diff --git a/src/Plankton.sln b/src/Plankton.sln index 98dffd3..05f79c9 100644 --- a/src/Plankton.sln +++ b/src/Plankton.sln @@ -1,32 +1,58 @@ - -Microsoft Visual Studio Solution File, Format Version 11.00 -# Visual C# Express 2010 -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Plankton", "Plankton\Plankton.csproj", "{BDD288F7-C2E2-4C2A-B083-E4D4D21F528F}" -EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "PlanktonGh", "PlanktonGh\PlanktonGh.csproj", "{9C7EA9A3-331B-4574-BF22-B9D609DE2F1E}" -EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "PlanktonTests", "PlanktonTests\PlanktonTests.csproj", "{8FC24017-EDF3-4747-B966-C29DBD18103F}" -EndProject -Global - GlobalSection(SolutionConfigurationPlatforms) = preSolution - Debug|Any CPU = Debug|Any CPU - Release|Any CPU = Release|Any CPU - EndGlobalSection - GlobalSection(ProjectConfigurationPlatforms) = postSolution - {BDD288F7-C2E2-4C2A-B083-E4D4D21F528F}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {BDD288F7-C2E2-4C2A-B083-E4D4D21F528F}.Debug|Any CPU.Build.0 = Debug|Any CPU - {BDD288F7-C2E2-4C2A-B083-E4D4D21F528F}.Release|Any CPU.ActiveCfg = Release|Any CPU - {BDD288F7-C2E2-4C2A-B083-E4D4D21F528F}.Release|Any CPU.Build.0 = Release|Any CPU - {9C7EA9A3-331B-4574-BF22-B9D609DE2F1E}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {9C7EA9A3-331B-4574-BF22-B9D609DE2F1E}.Debug|Any CPU.Build.0 = Debug|Any CPU - {9C7EA9A3-331B-4574-BF22-B9D609DE2F1E}.Release|Any CPU.ActiveCfg = Release|Any CPU - {9C7EA9A3-331B-4574-BF22-B9D609DE2F1E}.Release|Any CPU.Build.0 = Release|Any CPU - {8FC24017-EDF3-4747-B966-C29DBD18103F}.Debug|Any CPU.Build.0 = Debug|Any CPU - {8FC24017-EDF3-4747-B966-C29DBD18103F}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {8FC24017-EDF3-4747-B966-C29DBD18103F}.Release|Any CPU.Build.0 = Release|Any CPU - {8FC24017-EDF3-4747-B966-C29DBD18103F}.Release|Any CPU.ActiveCfg = Release|Any CPU - EndGlobalSection - GlobalSection(SolutionProperties) = preSolution - HideSolutionNode = FALSE - EndGlobalSection -EndGlobal + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio 14 +VisualStudioVersion = 14.0.24720.0 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Plankton", "Plankton\Plankton.csproj", "{BDD288F7-C2E2-4C2A-B083-E4D4D21F528F}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "PlanktonGh", "PlanktonGh\PlanktonGh.csproj", "{9C7EA9A3-331B-4574-BF22-B9D609DE2F1E}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "PlanktonTests", "PlanktonTests\PlanktonTests.csproj", "{8FC24017-EDF3-4747-B966-C29DBD18103F}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "PlanktonFold", "PlanktonFold\PlanktonFold.csproj", "{066DA097-92CE-4E8D-BE88-8C875AFDC204}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|Any CPU = Debug|Any CPU + Debug32|Any CPU = Debug32|Any CPU + Debug64|Any CPU = Debug64|Any CPU + Release|Any CPU = Release|Any CPU + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {BDD288F7-C2E2-4C2A-B083-E4D4D21F528F}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {BDD288F7-C2E2-4C2A-B083-E4D4D21F528F}.Debug|Any CPU.Build.0 = Debug|Any CPU + {BDD288F7-C2E2-4C2A-B083-E4D4D21F528F}.Debug32|Any CPU.ActiveCfg = Debug|Any CPU + {BDD288F7-C2E2-4C2A-B083-E4D4D21F528F}.Debug32|Any CPU.Build.0 = Debug|Any CPU + {BDD288F7-C2E2-4C2A-B083-E4D4D21F528F}.Debug64|Any CPU.ActiveCfg = Debug|Any CPU + {BDD288F7-C2E2-4C2A-B083-E4D4D21F528F}.Debug64|Any CPU.Build.0 = Debug|Any CPU + {BDD288F7-C2E2-4C2A-B083-E4D4D21F528F}.Release|Any CPU.ActiveCfg = Release|Any CPU + {BDD288F7-C2E2-4C2A-B083-E4D4D21F528F}.Release|Any CPU.Build.0 = Release|Any CPU + {9C7EA9A3-331B-4574-BF22-B9D609DE2F1E}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {9C7EA9A3-331B-4574-BF22-B9D609DE2F1E}.Debug|Any CPU.Build.0 = Debug|Any CPU + {9C7EA9A3-331B-4574-BF22-B9D609DE2F1E}.Debug32|Any CPU.ActiveCfg = Debug|Any CPU + {9C7EA9A3-331B-4574-BF22-B9D609DE2F1E}.Debug32|Any CPU.Build.0 = Debug|Any CPU + {9C7EA9A3-331B-4574-BF22-B9D609DE2F1E}.Debug64|Any CPU.ActiveCfg = Debug|Any CPU + {9C7EA9A3-331B-4574-BF22-B9D609DE2F1E}.Debug64|Any CPU.Build.0 = Debug|Any CPU + {9C7EA9A3-331B-4574-BF22-B9D609DE2F1E}.Release|Any CPU.ActiveCfg = Release|Any CPU + {9C7EA9A3-331B-4574-BF22-B9D609DE2F1E}.Release|Any CPU.Build.0 = Release|Any CPU + {8FC24017-EDF3-4747-B966-C29DBD18103F}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {8FC24017-EDF3-4747-B966-C29DBD18103F}.Debug|Any CPU.Build.0 = Debug|Any CPU + {8FC24017-EDF3-4747-B966-C29DBD18103F}.Debug32|Any CPU.ActiveCfg = Debug|Any CPU + {8FC24017-EDF3-4747-B966-C29DBD18103F}.Debug32|Any CPU.Build.0 = Debug|Any CPU + {8FC24017-EDF3-4747-B966-C29DBD18103F}.Debug64|Any CPU.ActiveCfg = Debug|Any CPU + {8FC24017-EDF3-4747-B966-C29DBD18103F}.Debug64|Any CPU.Build.0 = Debug|Any CPU + {8FC24017-EDF3-4747-B966-C29DBD18103F}.Release|Any CPU.ActiveCfg = Release|Any CPU + {8FC24017-EDF3-4747-B966-C29DBD18103F}.Release|Any CPU.Build.0 = Release|Any CPU + {066DA097-92CE-4E8D-BE88-8C875AFDC204}.Debug|Any CPU.ActiveCfg = Debug64|Any CPU + {066DA097-92CE-4E8D-BE88-8C875AFDC204}.Debug|Any CPU.Build.0 = Debug64|Any CPU + {066DA097-92CE-4E8D-BE88-8C875AFDC204}.Debug32|Any CPU.ActiveCfg = Debug32|Any CPU + {066DA097-92CE-4E8D-BE88-8C875AFDC204}.Debug32|Any CPU.Build.0 = Debug32|Any CPU + {066DA097-92CE-4E8D-BE88-8C875AFDC204}.Debug64|Any CPU.ActiveCfg = Debug64|Any CPU + {066DA097-92CE-4E8D-BE88-8C875AFDC204}.Debug64|Any CPU.Build.0 = Debug64|Any CPU + {066DA097-92CE-4E8D-BE88-8C875AFDC204}.Release|Any CPU.ActiveCfg = Release|Any CPU + {066DA097-92CE-4E8D-BE88-8C875AFDC204}.Release|Any CPU.Build.0 = Release|Any CPU + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection +EndGlobal diff --git a/src/Plankton/Plankton.csproj b/src/Plankton/Plankton.csproj index 0985ae1..1816f31 100644 --- a/src/Plankton/Plankton.csproj +++ b/src/Plankton/Plankton.csproj @@ -1,60 +1,78 @@ - - - - Debug - AnyCPU - 8.0.30703 - 2.0 - {BDD288F7-C2E2-4C2A-B083-E4D4D21F528F} - Library - Properties - Plankton - Plankton - v4.0 - 512 - - - true - full - false - ..\..\bin\Debug\ - DEBUG;TRACE - prompt - 4 - - - pdbonly - true - ..\..\bin\Release\ - TRACE - prompt - 4 - ..\..\bin\Release\Plankton.xml - - - - - - - - - - - - - - - - - - - - - + + + + Debug + AnyCPU + 8.0.30703 + 2.0 + {BDD288F7-C2E2-4C2A-B083-E4D4D21F528F} + Library + Properties + Plankton + Plankton + v4.5 + 512 + + + + true + full + false + ..\..\..\..\..\AppData\Roaming\Grasshopper\Libraries\ + DEBUG;TRACE + prompt + 4 + false + + + pdbonly + true + ..\..\bin\Release\ + TRACE + prompt + 4 + ..\..\bin\Release\Plankton.xml + false + + + + ..\packages\MathNet.Numerics.3.16.0\lib\net40\MathNet.Numerics.dll + True + + + ..\..\..\..\..\..\..\Program Files\Rhinoceros 5 (64-bit)\System\RhinoCommon.dll + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/src/Plankton/PlanktonDiagram.cd b/src/Plankton/PlanktonDiagram.cd new file mode 100644 index 0000000..5ea3098 --- /dev/null +++ b/src/Plankton/PlanktonDiagram.cd @@ -0,0 +1,63 @@ + + + + + + AAABAAAAAAAAAAAAAgBAAAAAAAAAAAAAAAAACAAAAAA= + PlanktonFace.cs + + + + + + AAIABUEAAAAgALAACAAACgQggAAAQAAEQAABAABAAAA= + PlanktonFaceList.cs + + + + + + + AAABAAAAAAAAAAAAAgBAAAAACAAAAAIAAAAACAAAABA= + PlanktonHalfedge.cs + + + + + + AAICAAEAAAAggBAADAAACgQEggAAAAAEwgBBABABEAE= + PlanktonHalfedgeList.cs + + + + + + + AAAAAIAAEAAAAAAAAQAAIAgAAAAAAEAAEAoAAIAAgAA= + PlanktonMesh.cs + + + + + + AAAFAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAACQAAYAE= + PlanktonVertex.cs + + + + + + CAIAAIIAECAgEBAECAAACAQgwAKAAAAFwACFAAAACAE= + PlanktonVertexList.cs + + + + + + + gAAAAAAJAAQAAAAE4AEAAABAAQAAAIAAAAIAAEAAYiE= + PlanktonXYZ.cs + + + + \ No newline at end of file diff --git a/src/Plankton/PlanktonFace.cs b/src/Plankton/PlanktonFace.cs index 0b7ac24..ad5d1e0 100644 --- a/src/Plankton/PlanktonFace.cs +++ b/src/Plankton/PlanktonFace.cs @@ -1,38 +1,40 @@ -using System; - -namespace Plankton -{ - /// - /// Represents a face in Plankton's halfedge mesh data structure. - /// - public class PlanktonFace - { - public int FirstHalfedge; - - public PlanktonFace() - { - this.FirstHalfedge = -1; - } - - internal PlanktonFace(int halfedgeIndex) - { - this.FirstHalfedge = halfedgeIndex; - } - - /// - /// Gets an unset PlanktonFace. Unset faces have -1 for their first halfedge index. - /// - public static PlanktonFace Unset - { - get { return new PlanktonFace() { FirstHalfedge = -1 }; } - } - - /// - /// Whether or not the face is currently being referenced in the mesh. - /// - public bool IsUnused { get { return (this.FirstHalfedge < 0); } } - - [Obsolete()] - public bool Dead { get { return this.IsUnused; } } - } -} +using System; + +namespace Plankton +{ + /// + /// Represents a face in Plankton's halfedge mesh data structure. + /// + public class PlanktonFace + { + public int FirstHalfedge; + public int Index; + + + public PlanktonFace() + { + this.FirstHalfedge = -1; + } + + internal PlanktonFace(int halfedgeIndex) + { + this.FirstHalfedge = halfedgeIndex; + } + + /// + /// Gets an unset PlanktonFace. Unset faces have -1 for their first halfedge index. + /// + public static PlanktonFace Unset + { + get { return new PlanktonFace() { FirstHalfedge = -1 }; } + } + + /// + /// Whether or not the face is currently being referenced in the mesh. + /// + public bool IsUnused { get { return (this.FirstHalfedge < 0); } } + + [Obsolete()] + public bool Dead { get { return this.IsUnused; } } + } +} diff --git a/src/Plankton/PlanktonFaceList.cs b/src/Plankton/PlanktonFaceList.cs index 997d401..3132343 100644 --- a/src/Plankton/PlanktonFaceList.cs +++ b/src/Plankton/PlanktonFaceList.cs @@ -1,621 +1,603 @@ -using System; -using System.Collections; -using System.Collections.Generic; -using System.Linq; - -//using Rhino.Geometry; - -namespace Plankton -{ - /// - /// Provides access to the faces and Face related functionality of a Mesh. - /// - public class PlanktonFaceList : IEnumerable - { - private readonly PlanktonMesh _mesh; - private List _list; - - /// - /// Initializes a new instance of the class. - /// Should be called from the mesh constructor. - /// - /// The mesh to which this list of half-edges belongs. - internal PlanktonFaceList(PlanktonMesh owner) - { - this._list = new List(); - this._mesh = owner; - } - - /// - /// Gets the number of faces. - /// - public int Count - { - get - { - return this._list.Count; - } - } - - #region methods - #region face access - /// - /// Adds a new face to the end of the Face list. - /// - /// Face to add. - /// The index of the newly added face. - internal int Add(PlanktonFace face) - { - if (face == null) return -1; - this._list.Add(face); - return this.Count - 1; - } - - /// - /// Adds a new face to the end of the Face list. Creates any halfedge pairs that are required. - /// - /// The vertex indices which define the face, ordered anticlockwise. - /// The index of the newly added face (-1 in the case that the face could not be added). - /// The mesh must remain 2-manifold and orientable at all times. - public int AddFace(IEnumerable indices) - { - // This method always ensures that if a vertex lies on a boundary, - // vertex -> outgoingHalfedge -> adjacentFace == -1 - - int[] array = indices.ToArray(); // using Linq for convenience - - var hs = _mesh.Halfedges; - var vs = _mesh.Vertices; - int n = array.Length; - - // Don't allow degenerate faces - if (n < 3) return -1; - - // Check vertices - foreach (int i in array) - { - // Check that all vertex indices exist in this mesh - if (i < 0 || i >= vs.Count) - throw new IndexOutOfRangeException("No vertex exists at this index."); - // Check that all vertices are on a boundary - int outgoing = vs[i].OutgoingHalfedge; - if (outgoing != -1 && hs[outgoing].AdjacentFace != -1) - return -1; - } - - // For each pair of vertices, check for an existing halfedge - // If it exists, check that it doesn't already have a face - // If it doesn't exist, mark for creation of a new halfedge pair - int[] loop = new int[n]; - bool[] is_new = new bool[n]; - - for (int i = 0, ii = 1; i < n; i++, ii++, ii %= n) - { - int v1 = array[i], v2 = array[ii]; - - // Find existing edge, if it exists - int h = hs.FindHalfedge(v1, v2); - if (h < 0) - // No halfedge found, mark for creation - is_new[i] = true; - else if (hs[h].AdjacentFace > -1) - // Existing halfedge already has a face (non-manifold) - return -1; - else - loop[i] = h; - // NOTE: To PREVENT non-manifold vertices, uncomment the line below... - //if(is_new[i] && is_new[(i+n-1)%n] && vs[v1].OutgoingHalfedge > -1) return -1; - } - - // Now create any missing halfedge pairs... - // (This could be done in the loop above but it avoids having to tidy up - // any recently added halfedges should a non-manifold edge be found.) - for (int i = 0, ii = 1; i < n; i++, ii++, ii %= n) - { - if (is_new[i]) // new halfedge pair required - { - int v1 = array[i], v2 = array[ii]; - loop[i] = hs.AddPair(v1, v2, this.Count); - } - else - { - // Link existing halfedge to new face - hs[loop[i]].AdjacentFace = this.Count; - } - } - - // Link halfedges - for (int i = 0, ii = 1; i < n; i++, ii++, ii %= n) - { - //int v1 = array[i]; - int v2 = array[ii]; - int id = 0; - if (is_new[i]) id += 1; // first is new - if (is_new[ii]) id += 2; // second is new - - // Check for non-manifold vertex case, i.e. both current halfedges are new - // but the vertex between them is already part of another face. This vertex - // will have TWO OR MORE outgoing boundary halfedges! (Not strictly allowed, - // but it could happen if faces are added in an UGLY order.) - // TODO: If a mesh has non-manifold vertices perhaps it should be considered - // INVALID. Any operations performed on such a mesh cannot be relied upon to - // perform correctly as the adjacency information may not be correct. - // (More reading: http://www.pointclouds.org/blog/nvcs/) - if (id == 3 && vs[v2].OutgoingHalfedge > -1) id++; // id == 4 - - if (id > 0) // At least one of the halfedge pairs is new... - { - // Link outer halfedges - int outer_prev = -1, outer_next = -1; - switch (id) - { - case 1: // first is new, second is old - // iterate through halfedges clockwise around vertex #v2 until boundary - outer_prev = hs[loop[ii]].PrevHalfedge; - outer_next = hs.GetPairHalfedge(loop[i]); - break; - case 2: // second is new, first is old - outer_prev = hs.GetPairHalfedge(loop[ii]); - outer_next = hs[loop[i]].NextHalfedge; - break; - case 3: // both are new - outer_prev = hs.GetPairHalfedge(loop[ii]); - outer_next = hs.GetPairHalfedge(loop[i]); - break; - case 4: // both are new (non-manifold vertex) - // We have TWO boundaries to take care of here: first... - outer_prev = hs[vs[v2].OutgoingHalfedge].PrevHalfedge; - outer_next = hs.GetPairHalfedge(loop[i]); - hs[outer_prev].NextHalfedge = outer_next; - hs[outer_next].PrevHalfedge = outer_prev; - // and second... - outer_prev = hs.GetPairHalfedge(loop[ii]); - outer_next = vs[v2].OutgoingHalfedge; - break; - } - // outer_{prev,next} should now be set, so store links in HDS - if (outer_prev > -1 && outer_next > -1) - { - hs[outer_prev].NextHalfedge = outer_next; - hs[outer_next].PrevHalfedge = outer_prev; - } - - // Link inner halfedges - hs[loop[i]].NextHalfedge = loop[ii]; - hs[loop[ii]].PrevHalfedge = loop[i]; - - // ensure vertex->outgoing is boundary if vertex is boundary - if (is_new[i]) // first is new - { - vs[v2].OutgoingHalfedge = loop[i] + 1; - } - } - else // both old (non-manifold vertex trickery below) - { - // In the case that v2 links to the current second halfedge, creating a - // face here will redefine v2 as a non-boundary vertex. Do a quick lap of - // v2's other outgoing halfedges in case one of them is still a boundary - // (as will be the case if v2 was non-manifold). - if (vs[v2].OutgoingHalfedge == loop[ii]) - { - foreach (int h in hs.GetVertexCirculator(loop[ii]).Skip(1)) - { - if (hs[h].AdjacentFace < 0) - { - vs[v2].OutgoingHalfedge = h; - break; - } - } - } - // If inner loop exists, but for some reason it's not already linked - // (non-manifold vertex) make loop[i] adjacent to loop[ii]. Tidy up other - // halfedge links such that all outgoing halfedges remain visible to v2. - if (hs[loop[i]].NextHalfedge != loop[ii] || hs[loop[ii]].PrevHalfedge != loop[i]) - { - int next = hs[loop[i]].NextHalfedge; - int prev = hs[loop[ii]].PrevHalfedge; - // Find another boundary at this vertex to link 'next' and 'prev' into. - try - { - int boundary = hs.GetVertexCirculator(loop[ii]).Skip(1) - .First(h => hs[h].AdjacentFace < 0); - hs.MakeConsecutive(loop[i], loop[ii]); - hs.MakeConsecutive(hs[boundary].PrevHalfedge, next); - hs.MakeConsecutive(prev, boundary); - } - // If no other boundary is found, something must be wrong... - catch (InvalidOperationException) - { - throw new InvalidOperationException(string.Format( - "Failed to relink halfedges around vertex #{0} during creation of face #{1}", v2, this.Count)); - } - } - } - } - - // Finally, add the face and return its index - PlanktonFace f = new PlanktonFace() { FirstHalfedge = loop[0] }; - - return this.Add(f); - } - - /// - /// Appends a new triangular face to the end of the mesh face list. Creates any halfedge pairs that are required. - /// - /// The index of the newly added face (-1 in the case that the face could not be added). - /// Index of first corner. - /// Index of second corner. - /// Index of third corner. - /// The mesh must remain 2-manifold and orientable at all times. - public int AddFace(int a, int b, int c) - { - return this.AddFace(new int[] { a, b, c }); - } - - /// - /// Appends a new quadragular face to the end of the mesh face list. Creates any halfedge pairs that are required. - /// - /// The index of the newly added face (-1 in the case that the face could not be added). - /// Index of first corner. - /// Index of second corner. - /// Index of third corner. - /// Index of fourth corner. - /// The mesh must remain 2-manifold and orientable at all times. - public int AddFace(int a, int b, int c, int d) - { - return this.AddFace(new int[] { a, b, c, d }); - } - - /// - /// Appends a list of faces to the end of the mesh face list. - /// - /// Faces to add. - /// Indices of the newly created faces. - public int[] AddFaces(IEnumerable> faces) - { - return faces.Select(f => this.AddFace(f)).ToArray(); - } - - /// - /// Removes a face from the mesh without affecting the remaining geometry. - /// Ensures that the topology of the halfedge mesh remains fully intact. - /// - /// The index of the face to be removed. - public void RemoveFace(int index) - { - int[] fhs = this.GetHalfedges(index); - foreach (int h in fhs) - { - if (_mesh.Halfedges.IsBoundary(h)) - { - // If halfedge is on a boundary then remove the pair - _mesh.Halfedges.RemovePairHelper(h); - } - else - { - // If halfedge was not previously a boundary, it is now - var heObj = _mesh.Halfedges[h]; - heObj.AdjacentFace = -1; - _mesh.Vertices[heObj.StartVertex].OutgoingHalfedge = h; - } - } - this[index] = PlanktonFace.Unset; - } - - /// - /// Returns the face at the given index. - /// - /// - /// Index of face to get. - /// Must be larger than or equal to zero and smaller than the Face Count of the mesh. - /// - /// The face at the given index. - public PlanktonFace this[int index] - { - get - { - return this._list[index]; - } - internal set - { - this._list[index] = value; - } - } - #endregion - - /// - /// Helper method to remove dead faces from the list, re-index and compact. - /// - internal void CompactHelper() - { - int marker = 0; // Location where the current face should be moved to - - // Run through all the faces - for (int iter = 0; iter < _list.Count; iter++) - { - // If face is alive, check if we need to shuffle it down the list - if (!_list[iter].IsUnused) - { - if (marker < iter) - { - // Room to shuffle. Copy current face to marked slot. - _list[marker] = _list[iter]; - - // Update all halfedges which are adjacent - int first = _list[marker].FirstHalfedge; - foreach (int h in _mesh.Halfedges.GetFaceCirculator(first)) - { - _mesh.Halfedges[h].AdjacentFace = marker; - } - } - marker++; // That spot's filled. Advance the marker. - } - } - - // Trim list down to new size - if (marker < _list.Count) { _list.RemoveRange(marker, _list.Count - marker); } - } - - #region traversals - /// - /// Traverses the halfedge indices which bound a face. - /// - /// A face index. - /// An enumerable of halfedge indices incident to the specified face. - /// Ordered anticlockwise around the face. - [Obsolete("GetHalfedgesCirculator(int) is deprecated, please use" + - "Halfedges.GetFaceCirculator(int) instead.")] - public IEnumerable GetHalfedgesCirculator(int f) - { - int he_first = this[f].FirstHalfedge; - if (he_first < 0) yield break; // face has no connectivity, exit - int he_current = he_first; - do - { - yield return he_current; - he_current = _mesh.Halfedges[he_current].NextHalfedge; - } - while (he_current != he_first); - } - #endregion - - #region adjacency queries - /// - /// Gets the halfedges which bound a face. - /// - /// A face index. - /// The indices of halfedges incident to a particular face. - /// Ordered anticlockwise around the face. - public int[] GetHalfedges(int f) - { - return _mesh.Halfedges.GetFaceCirculator(this[f].FirstHalfedge).ToArray(); - } - - /// - /// Gets vertex indices of a face. - /// - /// A face index. - /// An array of vertex indices incident to the specified face. - /// Ordered anticlockwise around the face. - public int[] GetFaceVertices(int f) - { - return _mesh.Halfedges.GetFaceCirculator(this[f].FirstHalfedge) - .Select(h => _mesh.Halfedges[h].StartVertex).ToArray(); - } - - [Obsolete("GetVertices is deprecated, please use GetFaceVertices instead.")] - public int[] GetVertices(int f) - { - return this.GetFaceVertices(f); - } - #endregion - - #region Euler operators - /// - /// Split a face into two faces by inserting a new edge - /// - /// - /// The index of a second halfedge adjacent to the face to split. - /// The new edge will end at the start of this halfedge. - /// The index of a halfedge adjacent to the face to split. - /// The new edge will begin at the start of this halfedge. - /// The index of one of the newly created halfedges, or -1 on failure. - /// The returned halfedge will be adjacent to the pre-existing face. - public int SplitFace(int to, int from) - { - // split the adjacent face in 2 - // by creating a new edge from the start of the given halfedge - // to another vertex around the face - - var hs = _mesh.Halfedges; - - // check preconditions - int existing_face = hs[from].AdjacentFace; - if (existing_face == -1 || existing_face != hs[to].AdjacentFace) { return -1; } - if (from == to || hs[from].NextHalfedge == to || hs[to].NextHalfedge == from) { return -1; } - - // add the new halfedge pair - int new_halfedge1 = hs.AddPair(hs[from].StartVertex, hs[to].StartVertex, existing_face); - int new_halfedge2 = hs.GetPairHalfedge(new_halfedge1); - - // add a new face - //PlanktonFace new_face = new PlanktonFace(); - int new_face_index = this.Add(PlanktonFace.Unset); - - //link everything up - - //prev of input he becomes prev of new_he1 - hs.MakeConsecutive(hs[from].PrevHalfedge, new_halfedge1); - - //prev of he_around becomes prev of new_he2 - hs.MakeConsecutive(hs[to].PrevHalfedge, new_halfedge2); - - //next of new_he1 becomes he_around - hs.MakeConsecutive(new_halfedge1, to); - - //next of new_he2 becomes index - hs.MakeConsecutive(new_halfedge2, from); - - //set the original face's first halfedge to new_he1 - this[existing_face].FirstHalfedge = new_halfedge1; - //set the new face's first halfedge to new_he2 - this[new_face_index].FirstHalfedge = new_halfedge2; - - //set adjface of new face loop - foreach (int h in _mesh.Halfedges.GetFaceCirculator(new_halfedge2)) - { - hs[h].AdjacentFace = new_face_index; - } - - //think thats all of it! - - return new_halfedge1; - } - - /// - /// Merges the two faces incident to the specified halfedge pair. - /// - /// - /// The index of a halfedge inbetween the two faces to merge. - /// The face adjacent to this halfedge will be retained. - /// The successor of around the face, or -1 on failure. - /// - /// The invariant mesh.Faces.MergeFaces(mesh.Faces.SplitFace(a, b)) will return a, - /// leaving the mesh unchanged. - public int MergeFaces(int index) - { - var hs = _mesh.Halfedges; - int pair = hs.GetPairHalfedge(index); - int face = hs[index].AdjacentFace; - int pair_face = hs[pair].AdjacentFace; - - // Check for a face on both sides - if (face == -1 || pair_face == -1) { return -1; } - - // Both vertices incident to given halfedge must have valence > 2 - if (3 > _mesh.Vertices.GetHalfedges(hs[index].StartVertex).Length) { return -1; } - if (3 > _mesh.Vertices.GetHalfedges(hs[pair].StartVertex).Length) { return -1; } - - // Make combined face halfedges consecutive - int index_prev = hs[index].PrevHalfedge; - int index_next = hs[index].NextHalfedge; - - // Remove halfedges (handles re-linking at ends and re-assigning vertices' outgoing hes) - hs.RemovePairHelper(index); - - // Update retained face's first halfedge, if necessary - if (this[face].FirstHalfedge == index) - this[face].FirstHalfedge = index_next; - - // Go around the dead face, reassigning adjacency - foreach (int h in hs.GetFaceCirculator(index_next)) - { - hs[h].AdjacentFace = face; - } - - // Keep the adjacent face, but remove the pair's adjacent face - this[pair_face] = PlanktonFace.Unset; - - return index_next; - } - - /// - /// Divides an n-sided face into n triangles, adding a new vertex in the center of the face. - /// - /// The index of the face to stellate - /// The index of the central vertex - public int Stellate(int f) - { - int central_vertex = _mesh.Vertices.Add(this.GetFaceCenter(f)); - int CountBefore = _mesh.Halfedges.Count(); - int[] FaceHalfEdges = this.GetHalfedges(f); - for (int i = 0; i < FaceHalfEdges.Length; i++) - { - int ThisHalfEdge = FaceHalfEdges[i]; - int TriangleFace; - if (i == 0) {TriangleFace = f;} - else {TriangleFace = this.Add(PlanktonFace.Unset);} - this[TriangleFace].FirstHalfedge = ThisHalfEdge; - _mesh.Halfedges[ThisHalfEdge].AdjacentFace = TriangleFace; - int OutSpoke = _mesh.Halfedges.AddPair(central_vertex, _mesh.Halfedges[ThisHalfEdge].StartVertex, TriangleFace); - if (i == 0) { _mesh.Vertices[central_vertex].OutgoingHalfedge = OutSpoke; } - _mesh.Halfedges.MakeConsecutive(OutSpoke,ThisHalfEdge); - } - for (int i = 0; i < FaceHalfEdges.Length; i++) - { - int ThisHalfEdge = FaceHalfEdges[i]; - if(i - /// Gets the barycenter of a face's vertices. - /// - /// A face index. - /// The location of the specified face's barycenter. - public PlanktonXYZ GetFaceCenter(int f) - { - PlanktonXYZ centroid = PlanktonXYZ.Zero; - int count = 0; - foreach (int i in this.GetFaceVertices(f)) - { - centroid += _mesh.Vertices[i].ToXYZ(); - count++; - } - centroid *= 1f / count; - return centroid; - } - - [Obsolete("FaceCentroid is deprecated, please use GetFaceCenter instead.")] - public PlanktonXYZ FaceCentroid(int f) - { - return this.GetFaceCenter(f); - } - - /// - /// Gets the number of naked edges which bound this face. - /// - /// A face index. - /// The number of halfedges for which the opposite halfedge has no face (i.e. adjacent face index is -1). - public int NakedEdgeCount(int f) - { - int nakedCount = 0; - foreach (int i in _mesh.Halfedges.GetFaceCirculator(this[f].FirstHalfedge)) - { - if (_mesh.Halfedges[_mesh.Halfedges.GetPairHalfedge(i)].AdjacentFace == -1) nakedCount++; - } - return nakedCount; - } - #endregion - - #region IEnumerable implementation - /// - /// Gets an enumerator that yields all faces in this collection. - /// - /// An enumerator. - public IEnumerator GetEnumerator() - { - return this._list.GetEnumerator(); - } - IEnumerator IEnumerable.GetEnumerator() - { - return this.GetEnumerator(); - } - #endregion - } -} +using System; +using System.Collections; +using System.Collections.Generic; +using System.Linq; + +//using Rhino.Geometry; + +namespace Plankton +{ + /// + /// Provides access to the faces and Face related functionality of a Mesh. + /// + public class PlanktonFaceList : IEnumerable + { + private readonly PlanktonMesh _mesh; + private List _list; + + /// + /// Initializes a new instance of the class. + /// Should be called from the mesh constructor. + /// + /// The mesh to which this list of half-edges belongs. + internal PlanktonFaceList(PlanktonMesh owner) + { + this._list = new List(); + this._mesh = owner; + } + + /// + /// Gets the number of faces. + /// + public int Count + { + get + { + return this._list.Count; + } + } + + #region methods + #region face access + /// + /// Adds a new face to the end of the Face list. + /// + /// Face to add. + /// The index of the newly added face. + internal int Add(PlanktonFace face) + { + if (face == null) return -1; + this._list.Add(face); + return this.Count - 1; + } + + /// + /// Adds a new face to the end of the Face list. Creates any halfedge pairs that are required. + /// + /// The vertex indices which define the face, ordered anticlockwise. + /// The index of the newly added face (-1 in the case that the face could not be added). + /// The mesh must remain 2-manifold and orientable at all times. + public int AddFace(IEnumerable indices) + { + // This method always ensures that if a vertex lies on a boundary, + // vertex -> outgoingHalfedge -> adjacentFace == -1 + + int[] array = indices.ToArray(); // using Linq for convenience + + var hs = _mesh.Halfedges; + var vs = _mesh.Vertices; + int n = array.Length; + + // Don't allow degenerate faces + if (n < 3) return -1; + + // Check vertices + foreach (int i in array) + { + // Check that all vertex indices exist in this mesh + if (i < 0 || i >= vs.Count) + throw new IndexOutOfRangeException("No vertex exists at this index."); + // Check that all vertices are on a boundary + int outgoing = vs[i].OutgoingHalfedge; + if (outgoing != -1 && hs[outgoing].AdjacentFace != -1) + return -1; + } + + // For each pair of vertices, check for an existing halfedge + // If it exists, check that it doesn't already have a face + // If it doesn't exist, mark for creation of a new halfedge pair + int[] loop = new int[n]; + bool[] is_new = new bool[n]; + + for (int i = 0, ii = 1; i < n; i++, ii++, ii %= n) + { + int v1 = array[i], v2 = array[ii]; + + // Find existing edge, if it exists + int h = hs.FindHalfedge(v1, v2); + if (h < 0) + // No halfedge found, mark for creation + is_new[i] = true; + else if (hs[h].AdjacentFace > -1) + // Existing halfedge already has a face (non-manifold) + return -1; + else + loop[i] = h; + // NOTE: To PREVENT non-manifold vertices, uncomment the line below... + //if(is_new[i] && is_new[(i+n-1)%n] && vs[v1].OutgoingHalfedge > -1) return -1; + } + + // Now create any missing halfedge pairs... + // (This could be done in the loop above but it avoids having to tidy up + // any recently added halfedges should a non-manifold edge be found.) + for (int i = 0, ii = 1; i < n; i++, ii++, ii %= n) + { + if (is_new[i]) // new halfedge pair required + { + int v1 = array[i], v2 = array[ii]; + loop[i] = hs.AddPair(v1, v2, this.Count); + } + else + { + // Link existing halfedge to new face + hs[loop[i]].AdjacentFace = this.Count; + } + } + + // Link halfedges + for (int i = 0, ii = 1; i < n; i++, ii++, ii %= n) + { + //int v1 = array[i]; + int v2 = array[ii]; + int id = 0; + if (is_new[i]) id += 1; // first is new + if (is_new[ii]) id += 2; // second is new + + // Check for non-manifold vertex case, i.e. both current halfedges are new + // but the vertex between them is already part of another face. This vertex + // will have TWO OR MORE outgoing boundary halfedges! (Not strictly allowed, + // but it could happen if faces are added in an UGLY order.) + // TODO: If a mesh has non-manifold vertices perhaps it should be considered + // INVALID. Any operations performed on such a mesh cannot be relied upon to + // perform correctly as the adjacency information may not be correct. + // (More reading: http://www.pointclouds.org/blog/nvcs/) + if (id == 3 && vs[v2].OutgoingHalfedge > -1) id++; // id == 4 + + if (id > 0) // At least one of the halfedge pairs is new... + { + // Link outer halfedges + int outer_prev = -1, outer_next = -1; + switch (id) + { + case 1: // first is new, second is old + // iterate through halfedges clockwise around vertex #v2 until boundary + outer_prev = hs[loop[ii]].PrevHalfedge; + outer_next = hs.GetPairHalfedge(loop[i]); + break; + case 2: // second is new, first is old + outer_prev = hs.GetPairHalfedge(loop[ii]); + outer_next = hs[loop[i]].NextHalfedge; + break; + case 3: // both are new + outer_prev = hs.GetPairHalfedge(loop[ii]); + outer_next = hs.GetPairHalfedge(loop[i]); + break; + case 4: // both are new (non-manifold vertex) + // We have TWO boundaries to take care of here: first... + outer_prev = hs[vs[v2].OutgoingHalfedge].PrevHalfedge; + outer_next = hs.GetPairHalfedge(loop[i]); + hs[outer_prev].NextHalfedge = outer_next; + hs[outer_next].PrevHalfedge = outer_prev; + // and second... + outer_prev = hs.GetPairHalfedge(loop[ii]); + outer_next = vs[v2].OutgoingHalfedge; + break; + } + // outer_{prev,next} should now be set, so store links in HDS + if (outer_prev > -1 && outer_next > -1) + { + hs[outer_prev].NextHalfedge = outer_next; + hs[outer_next].PrevHalfedge = outer_prev; + } + + // Link inner halfedges + hs[loop[i]].NextHalfedge = loop[ii]; + hs[loop[ii]].PrevHalfedge = loop[i]; + + // ensure vertex->outgoing is boundary if vertex is boundary + if (is_new[i]) // first is new + { + vs[v2].OutgoingHalfedge = loop[i] + 1; + } + } + else // both old (non-manifold vertex trickery below) + { + // In the case that v2 links to the current second halfedge, creating a + // face here will redefine v2 as a non-boundary vertex. Do a quick lap of + // v2's other outgoing halfedges in case one of them is still a boundary + // (as will be the case if v2 was non-manifold). + if (vs[v2].OutgoingHalfedge == loop[ii]) + { + foreach (int h in hs.GetVertexCirculator(loop[ii]).Skip(1)) + { + if (hs[h].AdjacentFace < 0) + { + vs[v2].OutgoingHalfedge = h; + break; + } + } + } + // If inner loop exists, but for some reason it's not already linked + // (non-manifold vertex) make loop[i] adjacent to loop[ii]. Tidy up other + // halfedge links such that all outgoing halfedges remain visible to v2. + if (hs[loop[i]].NextHalfedge != loop[ii] || hs[loop[ii]].PrevHalfedge != loop[i]) + { + int next = hs[loop[i]].NextHalfedge; + int prev = hs[loop[ii]].PrevHalfedge; + // Find another boundary at this vertex to link 'next' and 'prev' into. + try + { + int boundary = hs.GetVertexCirculator(loop[ii]).Skip(1) + .First(h => hs[h].AdjacentFace < 0); + hs.MakeConsecutive(loop[i], loop[ii]); + hs.MakeConsecutive(hs[boundary].PrevHalfedge, next); + hs.MakeConsecutive(prev, boundary); + } + // If no other boundary is found, something must be wrong... + catch (InvalidOperationException) + { + throw new InvalidOperationException(string.Format( + "Failed to relink halfedges around vertex #{0} during creation of face #{1}", v2, this.Count)); + } + } + } + } + + // Finally, add the face and return its index + PlanktonFace f = new PlanktonFace() { FirstHalfedge = loop[0] }; + + return this.Add(f); + } + + /// + /// Appends a new triangular face to the end of the mesh face list. Creates any halfedge pairs that are required. + /// + /// The index of the newly added face (-1 in the case that the face could not be added). + /// Index of first corner. + /// Index of second corner. + /// Index of third corner. + /// The mesh must remain 2-manifold and orientable at all times. + public int AddFace(int a, int b, int c) + { + return this.AddFace(new int[] { a, b, c }); + } + + /// + /// Appends a new quadragular face to the end of the mesh face list. Creates any halfedge pairs that are required. + /// + /// The index of the newly added face (-1 in the case that the face could not be added). + /// Index of first corner. + /// Index of second corner. + /// Index of third corner. + /// Index of fourth corner. + /// The mesh must remain 2-manifold and orientable at all times. + public int AddFace(int a, int b, int c, int d) + { + return this.AddFace(new int[] { a, b, c, d }); + } + + /// + /// Appends a list of faces to the end of the mesh face list. + /// + /// Faces to add. + /// Indices of the newly created faces. + public int[] AddFaces(IEnumerable> faces) + { + return faces.Select(f => this.AddFace(f)).ToArray(); + } + + /// + /// Removes a face from the mesh without affecting the remaining geometry. + /// Ensures that the topology of the halfedge mesh remains fully intact. + /// + /// The index of the face to be removed. + public void RemoveFace(int index) + { + int[] fhs = this.GetHalfedges(index); + foreach (int h in fhs) + { + if (_mesh.Halfedges.IsBoundary(h)) + { + // If halfedge is on a boundary then remove the pair + _mesh.Halfedges.RemovePairHelper(h); + } + else + { + // If halfedge was not previously a boundary, it is now + var heObj = _mesh.Halfedges[h]; + heObj.AdjacentFace = -1; + _mesh.Vertices[heObj.StartVertex].OutgoingHalfedge = h; + } + } + this[index] = PlanktonFace.Unset; + } + + /// + /// Returns the face at the given index. + /// + /// + /// Index of face to get. + /// Must be larger than or equal to zero and smaller than the Face Count of the mesh. + /// + /// The face at the given index. + public PlanktonFace this[int index] + { + get + { + return this._list[index]; + } + internal set + { + this._list[index] = value; + } + } + #endregion + + /// + /// Helper method to remove dead faces from the list, re-index and compact. + /// + internal void CompactHelper() + { + int marker = 0; // Location where the current face should be moved to + + // Run through all the faces + for (int iter = 0; iter < _list.Count; iter++) + { + // If face is alive, check if we need to shuffle it down the list + if (!_list[iter].IsUnused) + { + if (marker < iter) + { + // Room to shuffle. Copy current face to marked slot. + _list[marker] = _list[iter]; + + // Update all halfedges which are adjacent + int first = _list[marker].FirstHalfedge; + foreach (int h in _mesh.Halfedges.GetFaceCirculator(first)) + { + _mesh.Halfedges[h].AdjacentFace = marker; + } + } + marker++; // That spot's filled. Advance the marker. + } + } + + // Trim list down to new size + if (marker < _list.Count) { _list.RemoveRange(marker, _list.Count - marker); } + } + + #region adjacency queries + /// + /// Gets the halfedges which bound a face. + /// + /// A face index. + /// The indices of halfedges incident to a particular face. + /// Ordered anticlockwise around the face. + public int[] GetHalfedges(int f) + { + return _mesh.Halfedges.GetFaceCirculator(this[f].FirstHalfedge).ToArray(); + } + + /// + /// Gets vertex indices of a face. + /// + /// A face index. + /// An array of vertex indices incident to the specified face. + /// Ordered anticlockwise around the face. + public int[] GetFaceVertices(int f) + { + return _mesh.Halfedges.GetFaceCirculator(this[f].FirstHalfedge) + .Select(h => _mesh.Halfedges[h].StartVertex).ToArray(); + } + + #endregion + + #region Euler operators + /// + /// Split a face into two faces by inserting a new edge + /// + /// + /// The index of a second halfedge adjacent to the face to split. + /// The new edge will end at the start of this halfedge. + /// The index of a halfedge adjacent to the face to split. + /// The new edge will begin at the start of this halfedge. + /// The index of one of the newly created halfedges, or -1 on failure. + /// The returned halfedge will be adjacent to the pre-existing face. + public int SplitFace(int to, int from) + { + // split the adjacent face in 2 + // by creating a new edge from the start of the given halfedge + // to another vertex around the face + + var hs = _mesh.Halfedges; + + // check preconditions + int existing_face = hs[from].AdjacentFace; + if (existing_face == -1 || existing_face != hs[to].AdjacentFace) { return -1; } + if (from == to || hs[from].NextHalfedge == to || hs[to].NextHalfedge == from) { return -1; } + + // add the new halfedge pair + int new_halfedge1 = hs.AddPair(hs[from].StartVertex, hs[to].StartVertex, existing_face); + int new_halfedge2 = hs.GetPairHalfedge(new_halfedge1); + + // add a new face + //PlanktonFace new_face = new PlanktonFace(); + int new_face_index = this.Add(PlanktonFace.Unset); + + //link everything up + + //prev of input he becomes prev of new_he1 + hs.MakeConsecutive(hs[from].PrevHalfedge, new_halfedge1); + + //prev of he_around becomes prev of new_he2 + hs.MakeConsecutive(hs[to].PrevHalfedge, new_halfedge2); + + //next of new_he1 becomes he_around + hs.MakeConsecutive(new_halfedge1, to); + + //next of new_he2 becomes index + hs.MakeConsecutive(new_halfedge2, from); + + //set the original face's first halfedge to new_he1 + this[existing_face].FirstHalfedge = new_halfedge1; + //set the new face's first halfedge to new_he2 + this[new_face_index].FirstHalfedge = new_halfedge2; + + //set adjface of new face loop + foreach (int h in _mesh.Halfedges.GetFaceCirculator(new_halfedge2)) + { + hs[h].AdjacentFace = new_face_index; + } + + //think thats all of it! + + return new_halfedge1; + } + + /// + /// Merges the two faces incident to the specified halfedge pair. + /// + /// + /// The index of a halfedge inbetween the two faces to merge. + /// The face adjacent to this halfedge will be retained. + /// The successor of around the face, or -1 on failure. + /// + /// The invariant mesh.Faces.MergeFaces(mesh.Faces.SplitFace(a, b)) will return a, + /// leaving the mesh unchanged. + public int MergeFaces(int index) + { + var hs = _mesh.Halfedges; + int pair = hs.GetPairHalfedge(index); + int face = hs[index].AdjacentFace; + int pair_face = hs[pair].AdjacentFace; + + // Check for a face on both sides + if (face == -1 || pair_face == -1) { return -1; } + + // Both vertices incident to given halfedge must have valence > 2 + if (3 > _mesh.Vertices.GetHalfedges(hs[index].StartVertex).Length) { return -1; } + if (3 > _mesh.Vertices.GetHalfedges(hs[pair].StartVertex).Length) { return -1; } + + // Make combined face halfedges consecutive + int index_prev = hs[index].PrevHalfedge; + int index_next = hs[index].NextHalfedge; + + // Remove halfedges (handles re-linking at ends and re-assigning vertices' outgoing hes) + hs.RemovePairHelper(index); + + // Update retained face's first halfedge, if necessary + if (this[face].FirstHalfedge == index) + this[face].FirstHalfedge = index_next; + + // Go around the dead face, reassigning adjacency + foreach (int h in hs.GetFaceCirculator(index_next)) + { + hs[h].AdjacentFace = face; + } + + // Keep the adjacent face, but remove the pair's adjacent face + this[pair_face] = PlanktonFace.Unset; + + return index_next; + } + + /// + /// Divides an n-sided face into n triangles, adding a new vertex in the center of the face. + /// + /// The index of the face to stellate + /// The index of the central vertex + public int Stellate(int f) + { + int central_vertex = _mesh.Vertices.Add(this.GetFaceCenter(f)); + int CountBefore = _mesh.Halfedges.Count(); + int[] FaceHalfEdges = this.GetHalfedges(f); + for (int i = 0; i < FaceHalfEdges.Length; i++) + { + int ThisHalfEdge = FaceHalfEdges[i]; + int TriangleFace; + if (i == 0) {TriangleFace = f;} + else {TriangleFace = this.Add(PlanktonFace.Unset);} + this[TriangleFace].FirstHalfedge = ThisHalfEdge; + _mesh.Halfedges[ThisHalfEdge].AdjacentFace = TriangleFace; + int OutSpoke = _mesh.Halfedges.AddPair(central_vertex, _mesh.Halfedges[ThisHalfEdge].StartVertex, TriangleFace); + if (i == 0) { _mesh.Vertices[central_vertex].OutgoingHalfedge = OutSpoke; } + _mesh.Halfedges.MakeConsecutive(OutSpoke,ThisHalfEdge); + } + for (int i = 0; i < FaceHalfEdges.Length; i++) + { + int ThisHalfEdge = FaceHalfEdges[i]; + if(i + /// Gets the barycenter of a face's vertices. + /// + /// A face index. + /// The location of the specified face's barycenter. + public PlanktonXYZ GetFaceCenter(int f) + { + PlanktonXYZ centroid = PlanktonXYZ.Zero; + int count = 0; + foreach (int i in this.GetFaceVertices(f)) + { + centroid += _mesh.Vertices[i].ToXYZ(); + count++; + } + centroid *= 1f / count; + return centroid; + } + + // !!! + /// + /// Gets the number of naked edges which bound this face. + /// + /// A face index. + /// The number of halfedges for which the opposite halfedge has no face (i.e. adjacent face index is -1). + public int NakedEdgeCount(int f) + { + int nakedCount = 0; + foreach (int i in _mesh.Halfedges.GetFaceCirculator(this[f].FirstHalfedge)) + { + if (_mesh.Halfedges[_mesh.Halfedges.GetPairHalfedge(i)].AdjacentFace == -1) nakedCount++; + } + return nakedCount; + } + + #endregion + + + + + #region IEnumerable implementation + /// + /// Gets an enumerator that yields all faces in this collection. + /// + /// An enumerator. + public IEnumerator GetEnumerator() + { + return this._list.GetEnumerator(); + } + IEnumerator IEnumerable.GetEnumerator() + { + return this.GetEnumerator(); + } + #endregion + + #region by dyliu + public void AssignFaceIndex() + { + for (int i = 0; i < this.Count(); i++) + { + this[i].Index = i; + } + } + + #endregion + } +} diff --git a/src/Plankton/PlanktonHalfedge.cs b/src/Plankton/PlanktonHalfedge.cs index 0f71aef..17dd73b 100644 --- a/src/Plankton/PlanktonHalfedge.cs +++ b/src/Plankton/PlanktonHalfedge.cs @@ -1,56 +1,87 @@ -using System; - -namespace Plankton -{ - /// - /// Represents a halfedge in Plankton's halfedge mesh data structure. - /// - public class PlanktonHalfedge - { - public int StartVertex; - public int AdjacentFace; - public int NextHalfedge; - public int PrevHalfedge; - - internal PlanktonHalfedge() - { - StartVertex = -1; - AdjacentFace = -1; - NextHalfedge = -1; - PrevHalfedge = -1; - } - - internal PlanktonHalfedge(int Start, int AdjFace, int Next) - { - StartVertex = Start; - AdjacentFace = AdjFace; - NextHalfedge = Next; - } - - /// - /// Gets an Unset PlanktonHalfedge. - /// - public static PlanktonHalfedge Unset - { - get - { - return new PlanktonHalfedge() - { - StartVertex = -1, - AdjacentFace = -1, - NextHalfedge = -1, - PrevHalfedge = -1 - }; - } - } - - /// - /// Whether or not the vertex is currently being referenced in the mesh. - /// Defined as a halfedge which has no starting vertex index. - /// - public bool IsUnused { get { return (this.StartVertex < 0); } } - - [Obsolete()] - public bool Dead { get { return this.IsUnused; } } - } -} +using System; + +namespace Plankton +{ + /// + /// Represents a halfedge in Plankton's halfedge mesh data structure. + /// + public class PlanktonHalfedge + { + public int StartVertex; + public int AdjacentFace; + public int NextHalfedge; + public int PrevHalfedge; + + // by dyliu, not used yet + //public int EndVertex; + //public int PairHalfEdge; // either +1 or -1 + public int Index; + + // for sorting(counter-clockwise) + public double angleToX; + public double angleToY; + + // for folding : + // -1, valey + // 0, cut line (naked edge) + // 1, mountain + public int MV; + + + + internal PlanktonHalfedge() + { + StartVertex = -1; + //EndVertex = -1; + AdjacentFace = -1; + NextHalfedge = -1; + PrevHalfedge = -1; + //PairHalfEdge = + } + + + + internal PlanktonHalfedge(int StartV, int AdjFace, int NextE) + { + StartVertex = StartV; + AdjacentFace = AdjFace; + NextHalfedge = NextE; + } + + + + /// + /// Gets an Unset PlanktonHalfedge. + /// + public static PlanktonHalfedge Unset + { + get + { + return new PlanktonHalfedge() + { + StartVertex = -1, + AdjacentFace = -1, // if true, this is a naked edge + NextHalfedge = -1, + PrevHalfedge = -1 + }; + } + } + + /// + /// Whether or not the vertex is currently being referenced in the mesh. + /// Defined as a halfedge which has no starting vertex index. + /// + public bool IsUnused { get { return (this.StartVertex < 0); } } + + #region by dyliu + //public Line ToLine() + //{ + + // return new Line(); + + //} + + #endregion + + } +} diff --git a/src/Plankton/PlanktonHalfedgeList.cs b/src/Plankton/PlanktonHalfedgeList.cs index f66ea6d..c94d8d5 100644 --- a/src/Plankton/PlanktonHalfedgeList.cs +++ b/src/Plankton/PlanktonHalfedgeList.cs @@ -1,587 +1,656 @@ -using System; -using System.Collections; -using System.Collections.Generic; -using System.Linq; - -namespace Plankton -{ - /// - /// Provides access to the halfedges and Halfedge related functionality of a Mesh. - /// - public class PlanktonHalfEdgeList : IEnumerable - { - private readonly PlanktonMesh _mesh; - private List _list; - - /// - /// Initializes a new instance of the class. - /// Should be called from the mesh constructor. - /// - /// The mesh to which this list of halfedges belongs. - internal PlanktonHalfEdgeList(PlanktonMesh owner) - { - this._list = new List(); - this._mesh = owner; - } - - /// - /// Gets the number of halfedges. - /// - public int Count - { - get - { - return this._list.Count; - } - } - - #region methods - #region halfedge access - /// - /// Adds a new halfedge to the end of the Halfedge list. - /// - /// Halfedge to add. - /// The index of the newly added halfedge. - public int Add(PlanktonHalfedge halfedge) - { - if (halfedge == null) return -1; - this._list.Add(halfedge); - return this.Count - 1; - } - - /// - /// Add a pair of halfedges to the mesh. - /// - /// A vertex index (from which the first halfedge originates). - /// A vertex index (from which the second halfedge originates). - /// A face index (adjacent to the first halfedge). - /// The index of the first halfedge in the pair. - internal int AddPair(int start, int end, int face) - { - // he->next = he->pair - int i = this.Count; - this.Add(new PlanktonHalfedge(start, face, i + 1)); - this.Add(new PlanktonHalfedge(end, -1, i)); - return i; - } - - /// - /// Removes a pair of halfedges from the mesh. - /// - /// The index of a halfedge in the pair to remove. - /// The halfedges are topologically disconnected from the mesh and marked for deletion. - /// Note that this helper method doesn't update adjacent faces. - internal void RemovePairHelper(int index) - { - int pair = this.GetPairHalfedge(index); - - // Reconnect adjacent halfedges - this.MakeConsecutive(this[pair].PrevHalfedge, this[index].NextHalfedge); - this.MakeConsecutive(this[index].PrevHalfedge, this[pair].NextHalfedge); - - // Update vertices' outgoing halfedges, if necessary. If last halfedge then - // make vertex unused (outgoing == -1), otherwise set to next around vertex. - var v1 = _mesh.Vertices[this[index].StartVertex]; - var v2 = _mesh.Vertices[this[pair].StartVertex]; - if (v1.OutgoingHalfedge == index) - { - if (this[pair].NextHalfedge == index) { v1.OutgoingHalfedge = -1; } - else { v1.OutgoingHalfedge = this[pair].NextHalfedge; } - } - if (v2.OutgoingHalfedge == pair) - { - if (this[index].NextHalfedge == pair) { v2.OutgoingHalfedge = -1; } - else { v2.OutgoingHalfedge = this[index].NextHalfedge; } - } - - // Mark halfedges for deletion - this[index] = PlanktonHalfedge.Unset; - this[pair] = PlanktonHalfedge.Unset; - } - - /// - /// Returns the halfedge at the given index. - /// - /// - /// Index of halfedge to get. - /// Must be larger than or equal to zero and smaller than the Halfedge Count of the mesh. - /// - /// The halfedge at the given index. - public PlanktonHalfedge this[int index] - { - get - { - return this._list[index]; - } - internal set - { - this._list[index] = value; - } - } - #endregion - - /// - /// Helper method to remove dead halfedges from the list, re-index and compact. - /// - internal void CompactHelper() - { - int marker = 0; // Location where the current halfedge should be moved to - - // Run through all the vertices - for (int iter = 0; iter < _list.Count; iter++) - { - // If halfedge is alive, check if we need to shuffle it down the list - if (!_list[iter].IsUnused) - { - if (marker < iter) - { - // Room to shuffle. Copy current halfedge to marked slot. - _list[marker] = _list[iter]; - - // Update start vertex, if necessary - var vertex = _mesh.Vertices[_list[marker].StartVertex]; - if (vertex.OutgoingHalfedge == iter) { vertex.OutgoingHalfedge = marker; } - - // Update adjacent face, if necessary - if (_list[marker].AdjacentFace > -1) - { - var face = _mesh.Faces[_list[marker].AdjacentFace]; - if (face.FirstHalfedge == iter) { face.FirstHalfedge = marker; } - } - - // Update next/prev halfedges - _list[_list[marker].NextHalfedge].PrevHalfedge = marker; - _list[_list[marker].PrevHalfedge].NextHalfedge = marker; - } - marker++; // That spot's filled. Advance the marker. - } - } - - // Throw a fit if we've ended up with an odd number of halfedges - // This could happen if only one of the halfedges in a pair was marked for deletion - if (marker % 2 > 0) { throw new InvalidOperationException("Halfedge count was odd after compaction"); } - - // Trim list down to new size - if (marker < _list.Count) { _list.RemoveRange(marker, _list.Count - marker); } - } - - #region traversals - /// - /// Traverses clockwise around the starting vertex of a halfedge. - /// - /// The index of a halfedge. - /// - /// An enumerable of halfedge indices incident to the starting vertex of - /// . Ordered clockwise around the vertex. - /// The returned enumerable will start with the specified halfedge. - /// - /// Lazily evaluated so if you change the mesh topology whilst using - /// this circulator, you'll know about it! - public IEnumerable GetVertexCirculator(int halfedgeIndex) - { - if (halfedgeIndex < 0 || halfedgeIndex > this.Count) { yield break; } - int h = halfedgeIndex; - int count = 0; - do - { - yield return h; - h = this[this.GetPairHalfedge(h)].NextHalfedge; - if (h < 0) { throw new InvalidOperationException("Unset index, cannot continue."); } - if (count++ > 999) { throw new InvalidOperationException("Runaway vertex circulator"); } - } - while (h != halfedgeIndex); - } - - /// - /// Traverses anticlockwise around the adjacent face of a halfedge. - /// - /// The index of a halfedge. - /// - /// An enumerable of halfedge indices incident to the adjacent face of - /// . Ordered anticlockwise around the face. - /// - /// Lazily evaluated so if you change the mesh topology whilst using - /// this circulator, you'll know about it! - public IEnumerable GetFaceCirculator(int halfedgeIndex) - { - if (halfedgeIndex < 0 || halfedgeIndex > this.Count) { yield break; } - int h = halfedgeIndex; - int count = 0; - do - { - yield return h; - h = this[h].NextHalfedge; - if (h < 0) { throw new InvalidOperationException("Unset index, cannot continue."); } - if (count++ > 999) { throw new InvalidOperationException("Runaway face circulator."); } - } - while (h != halfedgeIndex); - } - #endregion - - #region public helpers - /// - /// Gets the halfedge index between two vertices. - /// - /// A vertex index. - /// A vertex index. - /// If it exists, the index of the halfedge which originates - /// from and terminates at . - /// Otherwise -1 is returned. - public int FindHalfedge(int start, int end) - { - int halfedgeIndex = _mesh.Vertices[start].OutgoingHalfedge; - foreach (int h in this.GetVertexCirculator(halfedgeIndex)) - { - if (end == this[this.GetPairHalfedge(h)].StartVertex) - return h; - } - return -1; - } - - /// - /// Gets the opposing halfedge in a pair. - /// - /// A halfedge index. - /// The halfedge index with which the specified halfedge is paired. - public int GetPairHalfedge(int halfedgeIndex) - { - if (halfedgeIndex < 0 || halfedgeIndex >= this.Count) - { - throw new ArgumentOutOfRangeException(); - } - - return halfedgeIndex % 2 == 0 ? halfedgeIndex + 1 : halfedgeIndex - 1; - } - - [Obsolete("PairHalfedge is deprecated, pease use GetPairHalfedge instead.")] - public int PairHalfedge(int halfedgeIndex) - { - return this.GetPairHalfedge(halfedgeIndex); - } - - /// - /// Gets the two vertices for a halfedge. - /// - /// A halfedge index. - /// The pair of vertex indices connected by the specified halfedge. - /// The order follows the direction of the halfedge. - public int[] GetVertices(int index) - { - int I, J; - I = this[index].StartVertex; - J = this[this.GetPairHalfedge(index)].StartVertex; - - return new int[]{ I, J }; - } - - /// - /// Gets the halfedge a given number of 'next's around a face from a starting halfedge - /// - /// The halfedge to start from - /// How many steps around the face. 0 returns the start_he - /// The resulting halfedge - [Obsolete("GetNextHalfedge(int,int) is deprecated, please use" + - "GetFaceCirculator(int).ElementAt(int) instead (LINQ).")] - public int GetNextHalfEdge(int startHalfEdge, int around) - { - int he_around = startHalfEdge; - for (int i = 0; i < around; i++) - { - he_around = this[he_around].NextHalfedge; - } - return he_around; - } - - /// - /// A halfedge is on a boundary if it only has a face on one side. - /// - /// The index of a halfedge. - /// true if the specified halfedge is on a boundary; otherwise, false. - public bool IsBoundary(int index) - { - int pair = this.GetPairHalfedge(index); - - // Check for a face on both sides - return (this[index].AdjacentFace == -1 || this[pair].AdjacentFace == -1); - } - - /// - /// Gets the index of the vertex at the end of a halfedge. - /// - /// The index of a halfedge. - /// The index of vertex at the end of the specified halfedge. - /// This helper actually returns the start vertex of the other halfedge in the pair. - public int EndVertex(int halfedgeIndex) - { - return this[GetPairHalfedge(halfedgeIndex)].StartVertex; - } - #endregion - - #region internal helpers - internal void MakeConsecutive(int prev, int next) - { - this[prev].NextHalfedge = next; - this[next].PrevHalfedge = prev; - } - #endregion - - #region Geometry - public double[] GetLengths() - /// - /// Measure the lengths of all the halfedges - /// - /// An array of lengths for all halfedges, or -1 for dead ones - { - double[] Lengths = new double[this.Count]; - for (int i = 0; i < this.Count; i += 2) - { - double EdgeLength = GetLength(i); - Lengths[i] = EdgeLength; - Lengths[i + 1] = EdgeLength; - } - return Lengths; - } - - public double GetLength(int index) - /// - /// Measure the length of a single halfedge - /// - /// The length of the halfedge, or -1 if unused - { - double EdgeLength = -1; - if (this[index].IsUnused == false) - { - PlanktonXYZ Start = _mesh.Vertices[this[index].StartVertex].ToXYZ(); - PlanktonXYZ End = _mesh.Vertices[this.EndVertex(index)].ToXYZ(); - EdgeLength = (End - Start).Length; - } - return EdgeLength; - } - - #endregion - - #region Euler operators - /// - /// Performs an edge flip. This works by shifting the start/end vertices of the edge - /// anticlockwise around their faces (by one vertex) and as such can be applied to any - /// n-gon mesh, not just triangulations. - /// - /// The index of a halfedge in the edge to be flipped. - /// True on success, otherwise false. - public bool FlipEdge(int index) - { - // Don't allow if halfedge is on a boundary - if (this[index].AdjacentFace < 0 || this[GetPairHalfedge(index)].AdjacentFace < 0) - return false; - - // Make a note of some useful halfedges, along with 'index' itself - int pair = this.GetPairHalfedge(index); - int next = this[index].NextHalfedge; - int pair_next = this[pair].NextHalfedge; - - // Also don't allow if the edge that would be created by flipping already exists in the mesh - if (FindHalfedge(EndVertex(pair_next), EndVertex(next)) != -1) - return false; - - // to flip an edge - // 6 nexts - // 6 prevs - this.MakeConsecutive(this[pair].PrevHalfedge, next); - this.MakeConsecutive(index, this[next].NextHalfedge); - this.MakeConsecutive(next, pair); - this.MakeConsecutive(this[index].PrevHalfedge, pair_next); - this.MakeConsecutive(pair, this[pair_next].NextHalfedge); - this.MakeConsecutive(pair_next, index); - // for each vert, check if need to update outgoing - int v = this[index].StartVertex; - if (_mesh.Vertices[v].OutgoingHalfedge == index) - _mesh.Vertices[v].OutgoingHalfedge = pair_next; - v = this[pair].StartVertex; - if (_mesh.Vertices[v].OutgoingHalfedge == pair) - _mesh.Vertices[v].OutgoingHalfedge = next; - // for each face, check if need to update start he - int f = this[index].AdjacentFace; - if (_mesh.Faces[f].FirstHalfedge == next) - _mesh.Faces[f].FirstHalfedge = index; - f = this[pair].AdjacentFace; - if (_mesh.Faces[f].FirstHalfedge == pair_next) - _mesh.Faces[f].FirstHalfedge = pair; - // update 2 start verts - this[index].StartVertex = EndVertex(pair_next); - this[pair].StartVertex = EndVertex(next); - // 2 adjacentfaces - this[next].AdjacentFace = this[pair].AdjacentFace; - this[pair_next].AdjacentFace = this[index].AdjacentFace; - - return true; - } - - /// - /// Creates a new vertex, and inserts it along an existing edge, splitting it in 2. - /// - /// The index of a halfedge in the edge to be split. - /// The index of the newly created halfedge in the same direction as the input halfedge. - public int SplitEdge(int index) - { - int pair = this.GetPairHalfedge(index); - - // Create a copy of the existing vertex (user can move it afterwards if needs be) - int end_vertex = this[pair].StartVertex; - int new_vertex_index = _mesh.Vertices.Add(_mesh.Vertices[end_vertex].ToXYZ()); // use XYZ to copy - - // Add a new halfedge pair - int new_halfedge1 = this.AddPair(new_vertex_index, this.EndVertex(index), this[index].AdjacentFace); - int new_halfedge2 = this.GetPairHalfedge(new_halfedge1); - this[new_halfedge2].AdjacentFace = this[pair].AdjacentFace; - - // Link new pair into mesh - this.MakeConsecutive(new_halfedge1, this[index].NextHalfedge); - this.MakeConsecutive(index, new_halfedge1); - this.MakeConsecutive(this[pair].PrevHalfedge, new_halfedge2); - this.MakeConsecutive(new_halfedge2, pair); - - // Set new vertex's outgoing halfedge - _mesh.Vertices[new_vertex_index].OutgoingHalfedge = new_halfedge1; - - // Change the start of the pair of the input halfedge to the new vertex - this[pair].StartVertex = new_vertex_index; - - // Update end vertex's outgoing halfedge, if necessary - if (_mesh.Vertices[end_vertex].OutgoingHalfedge == pair) - { - _mesh.Vertices[end_vertex].OutgoingHalfedge = new_halfedge2; - } - - return new_halfedge1; - } - - /// - /// Split 2 adjacent triangles into 4 by inserting a new vertex along the edge - /// - /// The index of the halfedge to split. Must be between 2 triangles. - /// The index of the halfedge going from the new vertex to the end of the input halfedge, or -1 on failure - public int TriangleSplitEdge(int index) - { - //split the edge - // (I guess we could include a parameter for where along the edge to split) - int new_halfedge = this.SplitEdge(index); - int point_on_edge = this[new_halfedge].StartVertex; - - _mesh.Vertices[point_on_edge].X = 0.5F * (_mesh.Vertices[this[index].StartVertex].X + _mesh.Vertices[this.EndVertex(new_halfedge)].X); - _mesh.Vertices[point_on_edge].Y = 0.5F * (_mesh.Vertices[this[index].StartVertex].Y + _mesh.Vertices[this.EndVertex(new_halfedge)].Y); - _mesh.Vertices[point_on_edge].Z = 0.5F * (_mesh.Vertices[this[index].StartVertex].Z + _mesh.Vertices[this.EndVertex(new_halfedge)].Z); - - int new_face1 = _mesh.Faces.SplitFace(new_halfedge, this[this[new_halfedge].NextHalfedge].NextHalfedge); - int new_face2 = _mesh.Faces.SplitFace(this.GetPairHalfedge(index), this[this[this.GetPairHalfedge(index)].NextHalfedge].NextHalfedge); - - return new_halfedge; - } - - /// - /// Collapse an edge by combining 2 vertices - /// - /// The index of a halfedge in the edge to collapse. The end vertex will be removed - /// The successor to around its vertex, or -1 on failure. - public int CollapseEdge(int index) - { - var fs = _mesh.Faces; - int pair = this.GetPairHalfedge(index); - int v_keep = this[index].StartVertex; - int v_kill = this[pair].StartVertex; - int f = this[index].AdjacentFace; - int f_pair = this[pair].AdjacentFace; - - // Don't allow the creation of non-manifold vertices - // This would happen if the edge is internal (face on both sides) and - // both incident vertices lie on a boundary - if (f > -1 && f_pair > -1) - { - if (this[_mesh.Vertices[v_keep].OutgoingHalfedge].AdjacentFace < 0 && - this[_mesh.Vertices[v_kill].OutgoingHalfedge].AdjacentFace < 0) - { - return -1; - } - } - - // Avoid creating a non-manifold edge... - // If the edge is internal, then its ends must not have more than 2 neighbours in common. - // If the edge is a boundary edge (or has one 3+ sided face), then its ends must not - // have more than one neighbour in common. - //int allowed = (f > -1 && f_pair > -1) ? 2 : 1; - int allowed = 0; - if (f >= 0 && fs.GetHalfedges(f).Length == 3) { allowed++; } - if (f_pair >= 0 && fs.GetHalfedges(f_pair).Length == 3) { allowed++; } - if (_mesh.Vertices.GetVertexNeighbours(v_keep) - .Intersect(_mesh.Vertices.GetVertexNeighbours(v_kill)).Count() > allowed) - { - return -1; - } - - // Save a couple of halfedges for later - int next = this[index].NextHalfedge; - int pair_prev = this[pair].PrevHalfedge; - - // Find the halfedges starting at the vertex we are about to remove - // and reconnect them to the one we are keeping - foreach (int h in this.GetVertexCirculator(next)) - { - this[h].StartVertex = v_keep; - } - - // Store return halfedge index (next around start vertex) - int h_rtn = this[pair].NextHalfedge; - - // Set outgoing halfedge - int v_kill_outgoing = _mesh.Vertices[v_kill].OutgoingHalfedge; - if (this[v_kill_outgoing].AdjacentFace < 0 && v_kill_outgoing != pair) - _mesh.Vertices[v_keep].OutgoingHalfedge = v_kill_outgoing; - else if (_mesh.Vertices[v_keep].OutgoingHalfedge == index) - _mesh.Vertices[v_keep].OutgoingHalfedge = h_rtn; // Next around vertex - - // Bypass both halfedges by linking prev directly to next for each - this.MakeConsecutive(this[index].PrevHalfedge, next); - this.MakeConsecutive(pair_prev, this[pair].NextHalfedge); - - // Kill the halfedge pair and its end vertex - this[index] = PlanktonHalfedge.Unset; - this[pair] = PlanktonHalfedge.Unset; - _mesh.Vertices[v_kill] = PlanktonVertex.Unset; - - // Update faces' first halfedges, if necessary - if (f != -1 && fs[f].FirstHalfedge == index) - fs[f].FirstHalfedge = next; - if (f_pair != -1 && fs[f_pair].FirstHalfedge == pair) - fs[f_pair].FirstHalfedge = this[pair].NextHalfedge; - - // If either adjacent face was triangular it will now only have two sides. If so, - // try to merge faces into whatever is on the RIGHT of the associated halfedge. - if (f > -1 && this.GetFaceCirculator(next).Count() < 3) - { - if (fs.MergeFaces(this.GetPairHalfedge(next)) < 0) { fs.RemoveFace(f); } - } - if (f_pair > -1 && !this[pair_prev].IsUnused && this.GetFaceCirculator(pair_prev).Count() < 3) - { - if (fs.MergeFaces(this.GetPairHalfedge(pair_prev)) < 0) { fs.RemoveFace(f_pair); } - } - - return h_rtn; - } - #endregion - #endregion - - #region IEnumerable implementation - /// - /// Gets an enumerator that yields all halfedges in this collection. - /// - /// An enumerator. - public IEnumerator GetEnumerator() - { - return this._list.GetEnumerator(); - } - IEnumerator IEnumerable.GetEnumerator() - { - return this.GetEnumerator(); - } - #endregion - } -} +using System; +using System.Collections; +using System.Collections.Generic; +using System.Linq; + +namespace Plankton +{ + /// + /// Provides access to the halfedges and Halfedge related functionality of a Mesh. + /// + public class PlanktonHalfEdgeList : IEnumerable + { + private readonly PlanktonMesh _mesh; + private List _list; + + /// + /// Initializes a new instance of the class. + /// Should be called from the mesh constructor. + /// + /// The mesh to which this list of halfedges belongs. + internal PlanktonHalfEdgeList(PlanktonMesh owner) + { + this._list = new List(); + this._mesh = owner; + AssignHalfEdgeIndex(); + } + + /// + /// Gets the number of halfedges. + /// + public int Count + { + get + { + return this._list.Count; + } + } + + #region methods + #region halfedge access + /// + /// Adds a new halfedge to the end of the Halfedge list. + /// + /// Halfedge to add. + /// The index of the newly added halfedge. + public int Add(PlanktonHalfedge halfedge) + { + if (halfedge == null) return -1; + this._list.Add(halfedge); + return this.Count - 1; + } + + /// + /// Add a pair of halfedges to the mesh. + /// + /// A vertex index (from which the first halfedge originates). + /// A vertex index (from which the second halfedge originates). + /// A face index (adjacent to the first halfedge). + /// The index of the first halfedge in the pair. + internal int AddPair(int start, int end, int face) + { + // he->next = he->pair + int i = this.Count; + this.Add(new PlanktonHalfedge(start, face, i + 1)); + this.Add(new PlanktonHalfedge(end, -1, i)); + return i; + } + + /// + /// Removes a pair of halfedges from the mesh. + /// + /// The index of a halfedge in the pair to remove. + /// The halfedges are topologically disconnected from the mesh and marked for deletion. + /// Note that this helper method doesn't update adjacent faces. + internal void RemovePairHelper(int index) + { + int pair = this.GetPairHalfedge(index); + + // Reconnect adjacent halfedges + this.MakeConsecutive(this[pair].PrevHalfedge, this[index].NextHalfedge); + this.MakeConsecutive(this[index].PrevHalfedge, this[pair].NextHalfedge); + + // Update vertices' outgoing halfedges, if necessary. If last halfedge then + // make vertex unused (outgoing == -1), otherwise set to next around vertex. + var v1 = _mesh.Vertices[this[index].StartVertex]; + var v2 = _mesh.Vertices[this[pair].StartVertex]; + if (v1.OutgoingHalfedge == index) + { + if (this[pair].NextHalfedge == index) { v1.OutgoingHalfedge = -1; } + else { v1.OutgoingHalfedge = this[pair].NextHalfedge; } + } + if (v2.OutgoingHalfedge == pair) + { + if (this[index].NextHalfedge == pair) { v2.OutgoingHalfedge = -1; } + else { v2.OutgoingHalfedge = this[index].NextHalfedge; } + } + + // Mark halfedges for deletion + this[index] = PlanktonHalfedge.Unset; + this[pair] = PlanktonHalfedge.Unset; + } + + /// + /// Returns the halfedge at the given index. + /// + /// + /// Index of halfedge to get. + /// Must be larger than or equal to zero and smaller than the Halfedge Count of the mesh. + /// + /// The halfedge at the given index. + public PlanktonHalfedge this[int index] + { + get + { + return this._list[index]; + } + internal set + { + this._list[index] = value; + } + } + #endregion + + /// + /// Helper method to remove dead halfedges from the list, re-index and compact. + /// + internal void CompactHelper() + { + int marker = 0; // Location where the current halfedge should be moved to + + // Run through all the vertices + for (int iter = 0; iter < _list.Count; iter++) + { + // If halfedge is alive, check if we need to shuffle it down the list + if (!_list[iter].IsUnused) + { + if (marker < iter) + { + // Room to shuffle. Copy current halfedge to marked slot. + _list[marker] = _list[iter]; + + // Update start vertex, if necessary + var vertex = _mesh.Vertices[_list[marker].StartVertex]; + if (vertex.OutgoingHalfedge == iter) { vertex.OutgoingHalfedge = marker; } + + // Update adjacent face, if necessary + if (_list[marker].AdjacentFace > -1) + { + var face = _mesh.Faces[_list[marker].AdjacentFace]; + if (face.FirstHalfedge == iter) { face.FirstHalfedge = marker; } + } + + // Update next/prev halfedges + _list[_list[marker].NextHalfedge].PrevHalfedge = marker; + _list[_list[marker].PrevHalfedge].NextHalfedge = marker; + } + marker++; // That spot's filled. Advance the marker. + } + } + + // Throw a fit if we've ended up with an odd number of halfedges + // This could happen if only one of the halfedges in a pair was marked for deletion + if (marker % 2 > 0) { throw new InvalidOperationException("Halfedge count was odd after compaction"); } + + // Trim list down to new size + if (marker < _list.Count) { _list.RemoveRange(marker, _list.Count - marker); } + } + + #region traversals + // !!! + /// + /// Traverses clockwise around the starting vertex of a halfedge. + /// + /// The index of a halfedge. + /// + /// An enumerable of halfedge indices incident to the starting vertex of + /// . Ordered clockwise around the vertex. + /// The returned enumerable will start with the specified halfedge. + /// + /// Lazily evaluated so if you change the mesh topology whilst using + /// this circulator, you'll know about it! + public IEnumerable GetVertexCirculator(int halfedgeIndex) + { + if (halfedgeIndex < 0 || halfedgeIndex > this.Count) { yield break; } + int h = halfedgeIndex; + int count = 0; + do + { + yield return h; + h = this[this.GetPairHalfedge(h)].NextHalfedge; + if (h < 0) { throw new InvalidOperationException("Unset index, cannot continue."); } + if (count++ > 999) { throw new InvalidOperationException("Runaway vertex circulator"); } + } + while (h != halfedgeIndex); + } + //!!! + /// + /// Traverses anticlockwise around the adjacent face of a halfedge. + /// + /// The index of a halfedge. + /// + /// An enumerable of halfedge indices incident to the adjacent face of + /// . Ordered anticlockwise around the face. + /// + /// Lazily evaluated so if you change the mesh topology whilst using + /// this circulator, you'll know about it! + public IEnumerable GetFaceCirculator(int halfedgeIndex) + { + if (halfedgeIndex < 0 || halfedgeIndex > this.Count) { yield break; } + int h = halfedgeIndex; + int count = 0; + do + { + yield return h; + h = this[h].NextHalfedge; + if (h < 0) { throw new InvalidOperationException("Unset index, cannot continue."); } + if (count++ > 999) { throw new InvalidOperationException("Runaway face circulator."); } + } + while (h != halfedgeIndex); + } + #endregion + + #region public helpers + /// + /// Gets the halfedge index between two vertices. + /// + /// A vertex index. + /// A vertex index. + /// If it exists, the index of the halfedge which originates + /// from and terminates at . + /// Otherwise -1 is returned. + public int FindHalfedge(int start, int end) + { + int halfedgeIndex = _mesh.Vertices[start].OutgoingHalfedge; + foreach (int h in this.GetVertexCirculator(halfedgeIndex)) + { + if (end == this[this.GetPairHalfedge(h)].StartVertex) + return h; + } + return -1; + } + + // !!! + /// + /// Gets the opposing halfedge in a pair. + /// + /// A halfedge index. + /// The halfedge index with which the specified halfedge is paired. + public int GetPairHalfedge(int halfedgeIndex) + { + if (halfedgeIndex < 0 || halfedgeIndex >= this.Count) + { + throw new ArgumentOutOfRangeException(); + } + + return halfedgeIndex % 2 == 0 ? halfedgeIndex + 1 : halfedgeIndex - 1; + } + + // !!! + /// + /// Gets the two vertices for a halfedge. + /// + /// A halfedge index. + /// The pair of vertex indices connected by the specified halfedge. + /// The order follows the direction of the halfedge. + public int[] GetVertices(int index) + { + int I, J; + I = this[index].StartVertex; + J = this[this.GetPairHalfedge(index)].StartVertex; + + return new int[]{ I, J }; + } + public int[] GetVertices(PlanktonHalfedge e) + { + int I, J; + I = e.StartVertex; + J = this[this.GetPairHalfedge(e.Index)].StartVertex; + return new int[] { I, J }; + } + + /// + /// Gets the halfedge a given number of 'next's around a face from a starting halfedge + /// + /// The halfedge to start from + /// How many steps around the face. 0 returns the start_he + /// The resulting halfedge + [Obsolete("GetNextHalfedge(int,int) is deprecated, please use" + + "GetFaceCirculator(int).ElementAt(int) instead (LINQ).")] + public int GetNextHalfEdge(int startHalfEdge, int around) + { + int he_around = startHalfEdge; + for (int i = 0; i < around; i++) + { + he_around = this[he_around].NextHalfedge; + } + return he_around; + } + + // !!! + /// + /// A halfedge is on a boundary if it only has a face on one side. + /// + /// The index of a halfedge. + /// true if the specified halfedge is on a boundary; otherwise, false. + public bool IsBoundary(int index) + { + int pair = this.GetPairHalfedge(index); + + // Check for a face on both sides + return (this[index].AdjacentFace == -1 || this[pair].AdjacentFace == -1); + } + + // !!! + /// + /// Gets the index of the vertex at the end of a halfedge. + /// + /// The index of a halfedge. + /// The index of vertex at the end of the specified halfedge. + /// This helper actually returns the start vertex of the other halfedge in the pair. + public int EndVertex(int halfedgeIndex) + { + return this[GetPairHalfedge(halfedgeIndex)].StartVertex; + } + #endregion + + #region internal helpers + internal void MakeConsecutive(int prev, int next) + { + this[prev].NextHalfedge = next; + this[next].PrevHalfedge = prev; + } + #endregion + + #region Geometry + public double[] GetLengths() + /// + /// Measure the lengths of all the halfedges + /// + /// An array of lengths for all halfedges, or -1 for dead ones + { + double[] Lengths = new double[this.Count]; + for (int i = 0; i < this.Count; i += 2) + { + double EdgeLength = GetLength(i); + Lengths[i] = EdgeLength; + Lengths[i + 1] = EdgeLength; + } + return Lengths; + } + + public double GetLength(int index) + /// + /// Measure the length of a single halfedge + /// + /// The length of the halfedge, or -1 if unused + { + double EdgeLength = -1; + if (this[index].IsUnused == false) + { + PlanktonXYZ Start = _mesh.Vertices[this[index].StartVertex].ToXYZ(); + PlanktonXYZ End = _mesh.Vertices[this.EndVertex(index)].ToXYZ(); + EdgeLength = (End - Start).Length; + } + return EdgeLength; + } + + #endregion + + #region Euler operators + /// + /// Performs an edge flip. This works by shifting the start/end vertices of the edge + /// anticlockwise around their faces (by one vertex) and as such can be applied to any + /// n-gon mesh, not just triangulations. + /// + /// The index of a halfedge in the edge to be flipped. + /// True on success, otherwise false. + public bool FlipEdge(int index) + { + // Don't allow if halfedge is on a boundary + if (this[index].AdjacentFace < 0 || this[GetPairHalfedge(index)].AdjacentFace < 0) + return false; + + // Make a note of some useful halfedges, along with 'index' itself + int pair = this.GetPairHalfedge(index); + int next = this[index].NextHalfedge; + int pair_next = this[pair].NextHalfedge; + + // Also don't allow if the edge that would be created by flipping already exists in the mesh + if (FindHalfedge(EndVertex(pair_next), EndVertex(next)) != -1) + return false; + + // to flip an edge + // 6 nexts + // 6 prevs + this.MakeConsecutive(this[pair].PrevHalfedge, next); + this.MakeConsecutive(index, this[next].NextHalfedge); + this.MakeConsecutive(next, pair); + this.MakeConsecutive(this[index].PrevHalfedge, pair_next); + this.MakeConsecutive(pair, this[pair_next].NextHalfedge); + this.MakeConsecutive(pair_next, index); + // for each vert, check if need to update outgoing + int v = this[index].StartVertex; + if (_mesh.Vertices[v].OutgoingHalfedge == index) + _mesh.Vertices[v].OutgoingHalfedge = pair_next; + v = this[pair].StartVertex; + if (_mesh.Vertices[v].OutgoingHalfedge == pair) + _mesh.Vertices[v].OutgoingHalfedge = next; + // for each face, check if need to update start he + int f = this[index].AdjacentFace; + if (_mesh.Faces[f].FirstHalfedge == next) + _mesh.Faces[f].FirstHalfedge = index; + f = this[pair].AdjacentFace; + if (_mesh.Faces[f].FirstHalfedge == pair_next) + _mesh.Faces[f].FirstHalfedge = pair; + // update 2 start verts + this[index].StartVertex = EndVertex(pair_next); + this[pair].StartVertex = EndVertex(next); + // 2 adjacentfaces + this[next].AdjacentFace = this[pair].AdjacentFace; + this[pair_next].AdjacentFace = this[index].AdjacentFace; + + return true; + } + + /// + /// Creates a new vertex, and inserts it along an existing edge, splitting it in 2. + /// + /// The index of a halfedge in the edge to be split. + /// The index of the newly created halfedge in the same direction as the input halfedge. + public int SplitEdge(int index) + { + int pair = this.GetPairHalfedge(index); + + // Create a copy of the existing vertex (user can move it afterwards if needs be) + int end_vertex = this[pair].StartVertex; + int new_vertex_index = _mesh.Vertices.Add(_mesh.Vertices[end_vertex].ToXYZ()); // use XYZ to copy + + // Add a new halfedge pair + int new_halfedge1 = this.AddPair(new_vertex_index, this.EndVertex(index), this[index].AdjacentFace); + int new_halfedge2 = this.GetPairHalfedge(new_halfedge1); + this[new_halfedge2].AdjacentFace = this[pair].AdjacentFace; + + // Link new pair into mesh + this.MakeConsecutive(new_halfedge1, this[index].NextHalfedge); + this.MakeConsecutive(index, new_halfedge1); + this.MakeConsecutive(this[pair].PrevHalfedge, new_halfedge2); + this.MakeConsecutive(new_halfedge2, pair); + + // Set new vertex's outgoing halfedge + _mesh.Vertices[new_vertex_index].OutgoingHalfedge = new_halfedge1; + + // Change the start of the pair of the input halfedge to the new vertex + this[pair].StartVertex = new_vertex_index; + + // Update end vertex's outgoing halfedge, if necessary + if (_mesh.Vertices[end_vertex].OutgoingHalfedge == pair) + { + _mesh.Vertices[end_vertex].OutgoingHalfedge = new_halfedge2; + } + + return new_halfedge1; + } + + /// + /// Split 2 adjacent triangles into 4 by inserting a new vertex along the edge + /// + /// The index of the halfedge to split. Must be between 2 triangles. + /// The index of the halfedge going from the new vertex to the end of the input halfedge, or -1 on failure + public int TriangleSplitEdge(int index) + { + //split the edge + // (I guess we could include a parameter for where along the edge to split) + int new_halfedge = this.SplitEdge(index); + int point_on_edge = this[new_halfedge].StartVertex; + + _mesh.Vertices[point_on_edge].X = 0.5F * (_mesh.Vertices[this[index].StartVertex].X + _mesh.Vertices[this.EndVertex(new_halfedge)].X); + _mesh.Vertices[point_on_edge].Y = 0.5F * (_mesh.Vertices[this[index].StartVertex].Y + _mesh.Vertices[this.EndVertex(new_halfedge)].Y); + _mesh.Vertices[point_on_edge].Z = 0.5F * (_mesh.Vertices[this[index].StartVertex].Z + _mesh.Vertices[this.EndVertex(new_halfedge)].Z); + + int new_face1 = _mesh.Faces.SplitFace(new_halfedge, this[this[new_halfedge].NextHalfedge].NextHalfedge); + int new_face2 = _mesh.Faces.SplitFace(this.GetPairHalfedge(index), this[this[this.GetPairHalfedge(index)].NextHalfedge].NextHalfedge); + + return new_halfedge; + } + + /// + /// Collapse an edge by combining 2 vertices + /// + /// The index of a halfedge in the edge to collapse. The end vertex will be removed + /// The successor to around its vertex, or -1 on failure. + public int CollapseEdge(int index) + { + var fs = _mesh.Faces; + int pair = this.GetPairHalfedge(index); + int v_keep = this[index].StartVertex; + int v_kill = this[pair].StartVertex; + int f = this[index].AdjacentFace; + int f_pair = this[pair].AdjacentFace; + + // Don't allow the creation of non-manifold vertices + // This would happen if the edge is internal (face on both sides) and + // both incident vertices lie on a boundary + if (f > -1 && f_pair > -1) + { + if (this[_mesh.Vertices[v_keep].OutgoingHalfedge].AdjacentFace < 0 && + this[_mesh.Vertices[v_kill].OutgoingHalfedge].AdjacentFace < 0) + { + return -1; + } + } + + // Avoid creating a non-manifold edge... + // If the edge is internal, then its ends must not have more than 2 neighbours in common. + // If the edge is a boundary edge (or has one 3+ sided face), then its ends must not + // have more than one neighbour in common. + //int allowed = (f > -1 && f_pair > -1) ? 2 : 1; + int allowed = 0; + if (f >= 0 && fs.GetHalfedges(f).Length == 3) { allowed++; } + if (f_pair >= 0 && fs.GetHalfedges(f_pair).Length == 3) { allowed++; } + if (_mesh.Vertices.GetVertexNeighbours(v_keep) + .Intersect(_mesh.Vertices.GetVertexNeighbours(v_kill)).Count() > allowed) + { + return -1; + } + + // Save a couple of halfedges for later + int next = this[index].NextHalfedge; + int pair_prev = this[pair].PrevHalfedge; + + // Find the halfedges starting at the vertex we are about to remove + // and reconnect them to the one we are keeping + foreach (int h in this.GetVertexCirculator(next)) + { + this[h].StartVertex = v_keep; + } + + // Store return halfedge index (next around start vertex) + int h_rtn = this[pair].NextHalfedge; + + // Set outgoing halfedge + int v_kill_outgoing = _mesh.Vertices[v_kill].OutgoingHalfedge; + if (this[v_kill_outgoing].AdjacentFace < 0 && v_kill_outgoing != pair) + _mesh.Vertices[v_keep].OutgoingHalfedge = v_kill_outgoing; + else if (_mesh.Vertices[v_keep].OutgoingHalfedge == index) + _mesh.Vertices[v_keep].OutgoingHalfedge = h_rtn; // Next around vertex + + // Bypass both halfedges by linking prev directly to next for each + this.MakeConsecutive(this[index].PrevHalfedge, next); + this.MakeConsecutive(pair_prev, this[pair].NextHalfedge); + + // Kill the halfedge pair and its end vertex + this[index] = PlanktonHalfedge.Unset; + this[pair] = PlanktonHalfedge.Unset; + _mesh.Vertices[v_kill] = PlanktonVertex.Unset; + + // Update faces' first halfedges, if necessary + if (f != -1 && fs[f].FirstHalfedge == index) + fs[f].FirstHalfedge = next; + if (f_pair != -1 && fs[f_pair].FirstHalfedge == pair) + fs[f_pair].FirstHalfedge = this[pair].NextHalfedge; + + // If either adjacent face was triangular it will now only have two sides. If so, + // try to merge faces into whatever is on the RIGHT of the associated halfedge. + if (f > -1 && this.GetFaceCirculator(next).Count() < 3) + { + if (fs.MergeFaces(this.GetPairHalfedge(next)) < 0) { fs.RemoveFace(f); } + } + if (f_pair > -1 && !this[pair_prev].IsUnused && this.GetFaceCirculator(pair_prev).Count() < 3) + { + if (fs.MergeFaces(this.GetPairHalfedge(pair_prev)) < 0) { fs.RemoveFace(f_pair); } + } + + return h_rtn; + } + #endregion + #endregion + + #region IEnumerable implementation + /// + /// Gets an enumerator that yields all halfedges in this collection. + /// + /// An enumerator. + public IEnumerator GetEnumerator() + { + return this._list.GetEnumerator(); + } + IEnumerator IEnumerable.GetEnumerator() + { + return this.GetEnumerator(); + } + #endregion + + #region by dyliu + + public void AssignHalfEdgeIndex() + { + for (int i = 0; i < this.Count(); i++) + { + this[i].Index = i; + } + } + + public List GetNakedEdges() + { + List nakedEdges = new List(); + + foreach (PlanktonHalfedge i in this) + if (i.AdjacentFace == -1) nakedEdges.Add(i); + + return nakedEdges; + } + + //public List DetermineMVs() + //{ + // List MVs = new List(); + // foreach ( PlanktonHalfedge i in this) + // { + + // } + + //} + + /// + /// dyliu: this works for quad, the inupt halfedge should not be a naked one + /// + /// + /// if equals one, return 2 face ids; if 2, return 4... + /// + public List GetAdjacentFaces(int halfedgeId, int count) + { + if (this.IsBoundary(halfedgeId) == true || count <= 0) { return null; } + + List targetFaceIds = new List(); + + // initialization + int edge1 = halfedgeId; + int edge2 = this.GetPairHalfedge(edge1); + + for (int i = 0; i < count; i++) // each loop add 2 face ids + { + targetFaceIds.Add(this[edge1].AdjacentFace); + targetFaceIds.Add(this[edge2].AdjacentFace); + + edge1 = this.GetPairHalfedge(this.GetFaceCirculator(edge1).ElementAt(2)); + edge2 = this.GetPairHalfedge(this.GetFaceCirculator(edge2).ElementAt(2)); + + } + + return targetFaceIds; + } + + #endregion + + } +} diff --git a/src/Plankton/PlanktonMesh.cs b/src/Plankton/PlanktonMesh.cs index 76416a9..ef88f08 100644 --- a/src/Plankton/PlanktonMesh.cs +++ b/src/Plankton/PlanktonMesh.cs @@ -1,300 +1,308 @@ -//using Rhino.Geometry; -using System; -using System.Collections.Generic; -using System.Linq; - -namespace Plankton -{ - /// - /// Description of PlanktonMesh. - /// - public class PlanktonMesh - { - private PlanktonVertexList _vertices; - private PlanktonHalfEdgeList _halfedges; - private PlanktonFaceList _faces; - - #region "constructors" - public PlanktonMesh() //blank constructor - { - } - - public PlanktonMesh(PlanktonMesh source) - { - foreach (var v in source.Vertices) - { - this.Vertices.Add(new PlanktonVertex() { - OutgoingHalfedge = v.OutgoingHalfedge, - X = v.X, - Y = v.Y, - Z = v.Z - }); - } - foreach (var f in source.Faces) - { - this.Faces.Add(new PlanktonFace() { FirstHalfedge = f.FirstHalfedge }); - } - foreach (var h in source.Halfedges) - { - this.Halfedges.Add(new PlanktonHalfedge() { - StartVertex = h.StartVertex, - AdjacentFace = h.AdjacentFace, - NextHalfedge = h.NextHalfedge, - PrevHalfedge = h.PrevHalfedge, - }); - } - } - #endregion - - #region "properties" - /// - /// Gets access to the vertices collection in this mesh. - /// - public PlanktonVertexList Vertices - { - get { return _vertices ?? (_vertices = new PlanktonVertexList(this)); } - } - - /// - /// Gets access to the halfedges collection in this mesh. - /// - public PlanktonHalfEdgeList Halfedges - { - get { return _halfedges ?? (_halfedges = new PlanktonHalfEdgeList(this)); } - } - - /// - /// Gets access to the faces collection in this mesh. - /// - public PlanktonFaceList Faces - { - get { return _faces ?? (_faces = new PlanktonFaceList(this)); } - } - #endregion - - #region "general methods" - - /// - /// Calculate the volume of the mesh - /// - public double Volume() - { - double VolumeSum = 0; - for (int i = 0; i < this.Faces.Count; i++) - { - int[] FaceVerts = this.Faces.GetFaceVertices(i); - int EdgeCount = FaceVerts.Length; - if (EdgeCount == 3) - { - PlanktonXYZ P = this.Vertices[FaceVerts[0]].ToXYZ(); - PlanktonXYZ Q = this.Vertices[FaceVerts[1]].ToXYZ(); - PlanktonXYZ R = this.Vertices[FaceVerts[2]].ToXYZ(); - //get the signed volume of the tetrahedron formed by the triangle and the origin - VolumeSum += (1 / 6d) * ( - P.X * Q.Y * R.Z + - P.Y * Q.Z * R.X + - P.Z * Q.X * R.Y - - P.X * Q.Z * R.Y - - P.Y * Q.X * R.Z - - P.Z * Q.Y * R.X); - } - else - { - PlanktonXYZ P = this._faces.GetFaceCenter(i); - for (int j = 0; j < EdgeCount; j++) - { - PlanktonXYZ Q = this.Vertices[FaceVerts[j]].ToXYZ(); - PlanktonXYZ R = this.Vertices[FaceVerts[(j + 1) % EdgeCount]].ToXYZ(); - VolumeSum += (1 / 6d) * ( - P.X * Q.Y * R.Z + - P.Y * Q.Z * R.X + - P.Z * Q.X * R.Y - - P.X * Q.Z * R.Y - - P.Y * Q.X * R.Z - - P.Z * Q.Y * R.X); - } - } - } - return VolumeSum; - } - - public PlanktonMesh Dual() - { - // hack for open meshes - // TODO: improve this ugly method - if (this.IsClosed() == false) - { - var dual = new PlanktonMesh(); - - // create vertices from face centers - for (int i = 0; i < this.Faces.Count; i++) - { - dual.Vertices.Add(this.Faces.GetFaceCenter(i)); - } - - // create faces from the adjacent face indices of non-boundary vertices - for (int i = 0; i < this.Vertices.Count; i++) - { - if (this.Vertices.IsBoundary(i)) - { - continue; - } - dual.Faces.AddFace(this.Vertices.GetVertexFaces(i)); - } - - return dual; - } - - // can later add options for other ways of defining face centres (barycenter/circumcenter etc) - // won't work yet with naked boundaries - - PlanktonMesh P = this; - PlanktonMesh D = new PlanktonMesh(); - - //for every primal face, add the barycenter to the dual's vertex list - //dual vertex outgoing HE is primal face's start HE - //for every vertex of the primal, add a face to the dual - //dual face's startHE is primal vertex's outgoing's pair - - for (int i = 0; i < P.Faces.Count; i++) - { - var fc = P.Faces.GetFaceCenter(i); - D.Vertices.Add(new PlanktonVertex(fc.X, fc.Y, fc.Z)); - int[] FaceHalfedges = P.Faces.GetHalfedges(i); - for (int j = 0; j < FaceHalfedges.Length; j++) - { - if (P.Halfedges[P.Halfedges.GetPairHalfedge(FaceHalfedges[j])].AdjacentFace != -1) - { - // D.Vertices[i].OutgoingHalfedge = FaceHalfedges[j]; - D.Vertices[D.Vertices.Count-1].OutgoingHalfedge = P.Halfedges.GetPairHalfedge(FaceHalfedges[j]); - break; - } - } - } - - for (int i = 0; i < P.Vertices.Count; i++) - { - if (P.Vertices.NakedEdgeCount(i) == 0) - { - int df = D.Faces.Add(PlanktonFace.Unset); - // D.Faces[i].FirstHalfedge = P.PairHalfedge(P.Vertices[i].OutgoingHalfedge); - D.Faces[df].FirstHalfedge = P.Vertices[i].OutgoingHalfedge; - } - } - - // dual halfedge start V is primal AdjacentFace - // dual halfedge AdjacentFace is primal end V - // dual nextHE is primal's pair's prev - // dual prevHE is primal's next's pair - - // halfedge pairs stay the same - - for (int i = 0; i < P.Halfedges.Count; i++) - { - if ((P.Halfedges[i].AdjacentFace != -1) & (P.Halfedges[P.Halfedges.GetPairHalfedge(i)].AdjacentFace != -1)) - { - PlanktonHalfedge DualHE = PlanktonHalfedge.Unset; - PlanktonHalfedge PrimalHE = P.Halfedges[i]; - //DualHE.StartVertex = PrimalHE.AdjacentFace; - DualHE.StartVertex = P.Halfedges[P.Halfedges.GetPairHalfedge(i)].AdjacentFace; - - if (P.Vertices.NakedEdgeCount(PrimalHE.StartVertex) == 0) - { - //DualHE.AdjacentFace = P.Halfedges[P.PairHalfedge(i)].StartVertex; - DualHE.AdjacentFace = PrimalHE.StartVertex; - } - else { DualHE.AdjacentFace = -1; } - - //This will currently fail with open meshes... - //one option could be to build the dual with all halfedges, but mark some as dead - //if they connect to vertex -1 - //mark the 'external' faces all as -1 (the ones that are dual to boundary verts) - //then go through and if any next or prevs are dead hes then replace them with the next one around - //this needs to be done repeatedly until no further change - - //DualHE.NextHalfedge = P.Halfedges[P.PairHalfedge(i)].PrevHalfedge; - DualHE.NextHalfedge = P.Halfedges.GetPairHalfedge(PrimalHE.PrevHalfedge); - - //DualHE.PrevHalfedge = P.PairHalfedge(PrimalHE.NextHalfedge); - DualHE.PrevHalfedge = P.Halfedges[P.Halfedges.GetPairHalfedge(i)].NextHalfedge; - - D.Halfedges.Add(DualHE); - } - } - return D; - } - - public bool IsClosed() - { - for (int i = 0; i < this.Halfedges.Count; i++) - { - if (this.Halfedges[i].AdjacentFace < 0) - { - return false; - } - } - return true; - } - - /// - /// Truncates the vertices of a mesh. - /// - /// Optional parameter for the normalised distance along each edge to control the amount of truncation. - /// A new mesh, the result of the truncation. - public PlanktonMesh TruncateVertices(float t = 1f/3) - { - // TODO: handle special cases (t = 0.0, t = 0.5, t > 0.5) - var tMesh = new PlanktonMesh(this); - - var vxyz = tMesh.Vertices.Select(v => v.ToXYZ()).ToArray(); - PlanktonXYZ v0, v1, v2; - int[] oh; - for (int i = 0; i < this.Vertices.Count; i++) - { - oh = this.Vertices.GetHalfedges(i); - tMesh.Vertices.TruncateVertex(i); - foreach (var h in oh) - { - v0 = vxyz[this.Halfedges[h].StartVertex]; - v1 = vxyz[this.Halfedges.EndVertex(h)]; - v2 = v0 + (v1 - v0) * t; - tMesh.Vertices.SetVertex(tMesh.Halfedges[h].StartVertex, v2.X, v2.Y, v2.Z); - } - } - - return tMesh; - } - - /* Hide for the time being to avoid confusion... - public void RefreshVertexNormals() - { - } - public void RefreshFaceNormals() - { - } - public void RefreshEdgeNormals() - { - } - */ - - /// - /// Removes any unreferenced objects from arrays, reindexes as needed and shrinks arrays to minimum required size. - /// - /// Thrown if halfedge count is odd after compaction. - /// Most likely caused by only marking one of the halfedges in a pair for deletion. - public void Compact() - { - // Compact vertices, faces and halfedges - this.Vertices.CompactHelper(); - this.Faces.CompactHelper(); - this.Halfedges.CompactHelper(); - } - - //dihedral angle for an edge - // - - //skeletonize - build a new mesh with 4 faces for each original edge - - #endregion - } -} +//using Rhino.Geometry; +using System; +using System.Collections.Generic; +using System.Linq; + +namespace Plankton +{ + /// + /// Description of PlanktonMesh. + /// + public class PlanktonMesh + { + private PlanktonVertexList _vertices; + private PlanktonHalfEdgeList _halfedges; + private PlanktonFaceList _faces; + + #region "constructors" + public PlanktonMesh() //blank constructor + { + } + + public PlanktonMesh(PlanktonMesh source) + { + foreach (var v in source.Vertices) + { + this.Vertices.Add(new PlanktonVertex() { + OutgoingHalfedge = v.OutgoingHalfedge, + X = v.X, + Y = v.Y, + Z = v.Z + }); + } + this.Vertices.AssignVertexIndex(); + + foreach (var f in source.Faces) + { + this.Faces.Add(new PlanktonFace() { FirstHalfedge = f.FirstHalfedge }); + } + this.Faces.AssignFaceIndex(); + + + foreach (var h in source.Halfedges) + { + this.Halfedges.Add(new PlanktonHalfedge() { + StartVertex = h.StartVertex, + AdjacentFace = h.AdjacentFace, + NextHalfedge = h.NextHalfedge, + PrevHalfedge = h.PrevHalfedge, + }); + } + this.Vertices.AssignVertexIndex(); + + + } + #endregion + + #region "properties" + /// + /// Gets access to the vertices collection in this mesh. + /// + public PlanktonVertexList Vertices + { + get { return _vertices ?? (_vertices = new PlanktonVertexList(this)); } + } + + /// + /// Gets access to the halfedges collection in this mesh. + /// + public PlanktonHalfEdgeList Halfedges + { + get { return _halfedges ?? (_halfedges = new PlanktonHalfEdgeList(this)); } + } + + /// + /// Gets access to the faces collection in this mesh. + /// + public PlanktonFaceList Faces + { + get { return _faces ?? (_faces = new PlanktonFaceList(this)); } + } + #endregion + + #region "general methods" + + /// + /// Calculate the volume of the mesh + /// + public double Volume() + { + double VolumeSum = 0; + for (int i = 0; i < this.Faces.Count; i++) + { + int[] FaceVerts = this.Faces.GetFaceVertices(i); + int EdgeCount = FaceVerts.Length; + if (EdgeCount == 3) + { + PlanktonXYZ P = this.Vertices[FaceVerts[0]].ToXYZ(); + PlanktonXYZ Q = this.Vertices[FaceVerts[1]].ToXYZ(); + PlanktonXYZ R = this.Vertices[FaceVerts[2]].ToXYZ(); + //get the signed volume of the tetrahedron formed by the triangle and the origin + VolumeSum += (1 / 6d) * ( + P.X * Q.Y * R.Z + + P.Y * Q.Z * R.X + + P.Z * Q.X * R.Y - + P.X * Q.Z * R.Y - + P.Y * Q.X * R.Z - + P.Z * Q.Y * R.X); + } + else + { + PlanktonXYZ P = this._faces.GetFaceCenter(i); + for (int j = 0; j < EdgeCount; j++) + { + PlanktonXYZ Q = this.Vertices[FaceVerts[j]].ToXYZ(); + PlanktonXYZ R = this.Vertices[FaceVerts[(j + 1) % EdgeCount]].ToXYZ(); + VolumeSum += (1 / 6d) * ( + P.X * Q.Y * R.Z + + P.Y * Q.Z * R.X + + P.Z * Q.X * R.Y - + P.X * Q.Z * R.Y - + P.Y * Q.X * R.Z - + P.Z * Q.Y * R.X); + } + } + } + return VolumeSum; + } + + public PlanktonMesh Dual() + { + // hack for open meshes + // TODO: improve this ugly method + if (this.IsClosed() == false) + { + var dual = new PlanktonMesh(); + + // create vertices from face centers + for (int i = 0; i < this.Faces.Count; i++) + { + dual.Vertices.Add(this.Faces.GetFaceCenter(i)); + } + + // create faces from the adjacent face indices of non-boundary vertices + for (int i = 0; i < this.Vertices.Count; i++) + { + if (this.Vertices.IsBoundary(i)) + { + continue; + } + dual.Faces.AddFace(this.Vertices.GetVertexFaces(i)); + } + + return dual; + } + + // can later add options for other ways of defining face centres (barycenter/circumcenter etc) + // won't work yet with naked boundaries + + PlanktonMesh P = this; + PlanktonMesh D = new PlanktonMesh(); + + //for every primal face, add the barycenter to the dual's vertex list + //dual vertex outgoing HE is primal face's start HE + //for every vertex of the primal, add a face to the dual + //dual face's startHE is primal vertex's outgoing's pair + + for (int i = 0; i < P.Faces.Count; i++) + { + var fc = P.Faces.GetFaceCenter(i); + D.Vertices.Add(new PlanktonVertex(fc.X, fc.Y, fc.Z)); + int[] FaceHalfedges = P.Faces.GetHalfedges(i); + for (int j = 0; j < FaceHalfedges.Length; j++) + { + if (P.Halfedges[P.Halfedges.GetPairHalfedge(FaceHalfedges[j])].AdjacentFace != -1) + { + // D.Vertices[i].OutgoingHalfedge = FaceHalfedges[j]; + D.Vertices[D.Vertices.Count-1].OutgoingHalfedge = P.Halfedges.GetPairHalfedge(FaceHalfedges[j]); + break; + } + } + } + + for (int i = 0; i < P.Vertices.Count; i++) + { + if (P.Vertices.NakedEdgeCount(i) == 0) + { + int df = D.Faces.Add(PlanktonFace.Unset); + // D.Faces[i].FirstHalfedge = P.PairHalfedge(P.Vertices[i].OutgoingHalfedge); + D.Faces[df].FirstHalfedge = P.Vertices[i].OutgoingHalfedge; + } + } + + // dual halfedge start V is primal AdjacentFace + // dual halfedge AdjacentFace is primal end V + // dual nextHE is primal's pair's prev + // dual prevHE is primal's next's pair + + // halfedge pairs stay the same + + for (int i = 0; i < P.Halfedges.Count; i++) + { + if ((P.Halfedges[i].AdjacentFace != -1) & (P.Halfedges[P.Halfedges.GetPairHalfedge(i)].AdjacentFace != -1)) + { + PlanktonHalfedge DualHE = PlanktonHalfedge.Unset; + PlanktonHalfedge PrimalHE = P.Halfedges[i]; + //DualHE.StartVertex = PrimalHE.AdjacentFace; + DualHE.StartVertex = P.Halfedges[P.Halfedges.GetPairHalfedge(i)].AdjacentFace; + + if (P.Vertices.NakedEdgeCount(PrimalHE.StartVertex) == 0) + { + //DualHE.AdjacentFace = P.Halfedges[P.PairHalfedge(i)].StartVertex; + DualHE.AdjacentFace = PrimalHE.StartVertex; + } + else { DualHE.AdjacentFace = -1; } + + //This will currently fail with open meshes... + //one option could be to build the dual with all halfedges, but mark some as dead + //if they connect to vertex -1 + //mark the 'external' faces all as -1 (the ones that are dual to boundary verts) + //then go through and if any next or prevs are dead hes then replace them with the next one around + //this needs to be done repeatedly until no further change + + //DualHE.NextHalfedge = P.Halfedges[P.PairHalfedge(i)].PrevHalfedge; + DualHE.NextHalfedge = P.Halfedges.GetPairHalfedge(PrimalHE.PrevHalfedge); + + //DualHE.PrevHalfedge = P.PairHalfedge(PrimalHE.NextHalfedge); + DualHE.PrevHalfedge = P.Halfedges[P.Halfedges.GetPairHalfedge(i)].NextHalfedge; + + D.Halfedges.Add(DualHE); + } + } + return D; + } + + public bool IsClosed() + { + for (int i = 0; i < this.Halfedges.Count; i++) + { + if (this.Halfedges[i].AdjacentFace < 0) + { + return false; + } + } + return true; + } + + /// + /// Truncates the vertices of a mesh. + /// + /// Optional parameter for the normalised distance along each edge to control the amount of truncation. + /// A new mesh, the result of the truncation. + public PlanktonMesh TruncateVertices(float t = 1f/3) + { + // TODO: handle special cases (t = 0.0, t = 0.5, t > 0.5) + var tMesh = new PlanktonMesh(this); + + var vxyz = tMesh.Vertices.Select(v => v.ToXYZ()).ToArray(); + PlanktonXYZ v0, v1, v2; + int[] oh; + for (int i = 0; i < this.Vertices.Count; i++) + { + oh = this.Vertices.GetHalfedges(i); + tMesh.Vertices.TruncateVertex(i); + foreach (var h in oh) + { + v0 = vxyz[this.Halfedges[h].StartVertex]; + v1 = vxyz[this.Halfedges.EndVertex(h)]; + v2 = v0 + (v1 - v0) * t; + tMesh.Vertices.SetVertex(tMesh.Halfedges[h].StartVertex, v2.X, v2.Y, v2.Z); + } + } + + return tMesh; + } + + /* Hide for the time being to avoid confusion... + public void RefreshVertexNormals() + { + } + public void RefreshFaceNormals() + { + } + public void RefreshEdgeNormals() + { + } + */ + + /// + /// Removes any unreferenced objects from arrays, reindexes as needed and shrinks arrays to minimum required size. + /// + /// Thrown if halfedge count is odd after compaction. + /// Most likely caused by only marking one of the halfedges in a pair for deletion. + public void Compact() + { + // Compact vertices, faces and halfedges + this.Vertices.CompactHelper(); + this.Faces.CompactHelper(); + this.Halfedges.CompactHelper(); + } + + //dihedral angle for an edge + // + + //skeletonize - build a new mesh with 4 faces for each original edge + + #endregion + } +} diff --git a/src/Plankton/PlanktonVertex.cs b/src/Plankton/PlanktonVertex.cs index 617df25..3ec9d42 100644 --- a/src/Plankton/PlanktonVertex.cs +++ b/src/Plankton/PlanktonVertex.cs @@ -1,58 +1,60 @@ -using System; - -namespace Plankton -{ - /// - /// Represents a vertex in Plankton's halfedge mesh data structure. - /// - public class PlanktonVertex - { - public int OutgoingHalfedge; - - internal PlanktonVertex() - { - this.OutgoingHalfedge = -1; - } - - internal PlanktonVertex(float x, float y, float z) - { - OutgoingHalfedge = -1; - this.X = x; - this.Y = y; - this.Z = z; - } - - internal PlanktonVertex(double x, double y, double z) - : this((float) x, (float) y, (float) z) - { - // empty - } - - public float X { get; set; } - - public float Y { get; set; } - - public float Z { get; set; } - - public PlanktonXYZ ToXYZ() - { - return new PlanktonXYZ(this.X, this.Y, this.Z); - } - - /// - /// Gets an unset PlanktonVertex. Unset vertices have an outgoing halfedge index of -1. - /// - public static PlanktonVertex Unset - { - get { return new PlanktonVertex() { OutgoingHalfedge = -1 }; } - } - - /// - /// Whether or not the vertex is currently being referenced in the mesh. - /// - public bool IsUnused { get { return (this.OutgoingHalfedge < 0); } } - - [Obsolete()] - public bool Dead { get { return this.IsUnused; } } - } -} +using System; + +namespace Plankton +{ + /// + /// Represents a vertex in Plankton's halfedge mesh data structure. + /// + public class PlanktonVertex + { + public int OutgoingHalfedge; + public int Index; + + internal PlanktonVertex() + { + this.OutgoingHalfedge = -1; + + } + + internal PlanktonVertex(float x, float y, float z) + { + OutgoingHalfedge = -1; + this.X = x; + this.Y = y; + this.Z = z; + } + + internal PlanktonVertex(double x, double y, double z) + : this((float) x, (float) y, (float) z) + { + // empty + } + + public float X { get; set; } + + public float Y { get; set; } + + public float Z { get; set; } + + public PlanktonXYZ ToXYZ() + { + return new PlanktonXYZ(this.X, this.Y, this.Z); + } + + /// + /// Gets an unset PlanktonVertex. Unset vertices have an outgoing halfedge index of -1. + /// + public static PlanktonVertex Unset + { + get { return new PlanktonVertex() { OutgoingHalfedge = -1 }; } + } + + /// + /// Whether or not the vertex is currently being referenced in the mesh. + /// + public bool IsUnused { get { return (this.OutgoingHalfedge < 0); } } + + [Obsolete()] + public bool Dead { get { return this.IsUnused; } } + } +} diff --git a/src/Plankton/PlanktonVertexList.cs b/src/Plankton/PlanktonVertexList.cs index 1614c55..9f412bd 100644 --- a/src/Plankton/PlanktonVertexList.cs +++ b/src/Plankton/PlanktonVertexList.cs @@ -1,595 +1,612 @@ -using System; -using System.Collections; -using System.Collections.Generic; -using System.Linq; - -namespace Plankton -{ - /// - /// Provides access to the vertices and Vertex related functionality of a Mesh. - /// - public class PlanktonVertexList : IEnumerable - { - private readonly PlanktonMesh _mesh; - private List _list; - - /// - /// Initializes a new instance of the class. - /// Should be called from the mesh constructor. - /// - /// The mesh to which this list of vertices belongs. - internal PlanktonVertexList(PlanktonMesh owner) - { - this._list = new List(); - this._mesh = owner; - } - - /// - /// Gets the number of vertices. - /// - public int Count - { - get - { - return this._list.Count; - } - } - - #region methods - #region vertex access - #region adding - /// - /// Adds a new vertex to the end of the Vertex list. - /// - /// Vertex to add. - /// The index of the newly added vertex. - internal int Add(PlanktonVertex vertex) - { - if (vertex == null) return -1; - this._list.Add(vertex); - return this.Count - 1; - } - - /// - /// Adds a new vertex to the end of the Vertex list. - /// - /// Vertex to add. - /// The index of the newly added vertex. - internal int Add(PlanktonXYZ vertex) - { - this._list.Add(new PlanktonVertex(vertex.X,vertex.Y,vertex.Z)); - return this.Count - 1; - } - - /// - /// Adds a new vertex to the end of the Vertex list. - /// - /// X component of new vertex coordinate. - /// Y component of new vertex coordinate. - /// Z component of new vertex coordinate. - /// The index of the newly added vertex. - public int Add(double x, double y, double z) - { - return this.Add(new PlanktonVertex(x, y, z)); - } - - /// - /// Adds a new vertex to the end of the Vertex list. - /// - /// X component of new vertex coordinate. - /// Y component of new vertex coordinate. - /// Z component of new vertex coordinate. - /// The index of the newly added vertex. - public int Add(float x, float y, float z) - { - return this.Add(new PlanktonVertex(x, y, z)); - } - #endregion - - /// - /// Adds a series of new vertices to the end of the vertex list. - /// - /// A list, an array or any enumerable set of . - /// Indices of the newly created vertices. - public int[] AddVertices(IEnumerable vertices) - { - return vertices.Select(v => this.Add(v)).ToArray(); - } - - /// - /// Returns the vertex at the given index. - /// - /// - /// Index of vertex to get. - /// Must be larger than or equal to zero and smaller than the Vertex Count of the mesh. - /// - /// The vertex at the given index. - public PlanktonVertex this[int index] - { - get - { - return this._list[index]; - } - internal set - { - this._list[index] = value; - } - } - - /// - /// Sets or adds a vertex to the Vertex List. - /// If [index] is less than [Count], the existing vertex at [index] will be modified. - /// If [index] equals [Count], a new vertex is appended to the end of the vertex list. - /// If [index] is larger than [Count], the function will return false. - /// - /// Index of vertex to set. - /// X component of vertex location. - /// Y component of vertex location. - /// Z component of vertex location. - /// true on success, false on failure. - public bool SetVertex(int vertexIndex, float x, float y, float z) - { - if (vertexIndex >= 0 && vertexIndex < _list.Count) - { - var v = this._list[vertexIndex]; - v.X = x; - v.Y = y; - v.Z = z; - } - else if (vertexIndex == _list.Count) - { - this.Add(x, y, z); - } - else { return false; } - - return true; - } - - /// - /// Sets or adds a vertex to the Vertex List. - /// If [index] is less than [Count], the existing vertex at [index] will be modified. - /// If [index] equals [Count], a new vertex is appended to the end of the vertex list. - /// If [index] is larger than [Count], the function will return false. - /// - /// Index of vertex to set. - /// X component of vertex location. - /// Y component of vertex location. - /// Z component of vertex location. - /// true on success, false on failure. - public bool SetVertex(int vertexIndex, double x, double y, double z) - { - if (vertexIndex >= 0 && vertexIndex < _list.Count) - { - var v = this._list[vertexIndex]; - v.X = (float)x; - v.Y = (float)y; - v.Z = (float)z; - } - else if (vertexIndex == _list.Count) - { - this.Add(x, y, z); - } - else { return false; } - - return true; - } - #endregion - - /// - /// Helper method to remove dead vertices from the list, re-index and compact. - /// - internal int CompactHelper() - { - int marker = 0; // Location where the current vertex should be moved to - - // Run through all the vertices - for (int iter = 0; iter < _list.Count; iter++) - { - // If vertex is alive, check if we need to shuffle it down the list - if (!_list[iter].IsUnused) - { - if (marker < iter) - { - // Room to shuffle. Copy current vertex to marked slot. - _list[marker] = _list[iter]; - - // Update all halfedges which start here - int first = _list[marker].OutgoingHalfedge; - foreach (int h in _mesh.Halfedges.GetVertexCirculator(first)) - { - _mesh.Halfedges[h].StartVertex = marker; - } - } - marker++; // That spot's filled. Advance the marker. - } - } - - // Trim list down to new size - if (marker < _list.Count) { _list.RemoveRange(marker, _list.Count - marker); } - - return _list.Count - marker; - } - - /// - /// Removes all vertices that are currently not used by the Halfedge list. - /// - /// The number of unused vertices that were removed. - public int CullUnused() - { - return this.CompactHelper(); - } - - #region traversals - /// - /// Traverses the halfedge indices which originate from a vertex. - /// - /// A vertex index. - /// An enumerable of halfedge indices incident to the specified vertex. - /// Ordered clockwise around the vertex. - [Obsolete("GetHalfedgesCirculator(int) is deprecated, please use" + - "Halfedges.GetVertexCirculator(int) instead.")] - public IEnumerable GetHalfedgesCirculator(int v) - { - int he_first = this[v].OutgoingHalfedge; - if (he_first < 0) yield break; // vertex has no connectivity, exit - int he_current = he_first; - var hs = _mesh.Halfedges; - do - { - yield return he_current; - he_current = hs[hs.GetPairHalfedge(he_current)].NextHalfedge; - } - while (he_current != he_first); - } - - /// - /// Traverses the halfedge indices which originate from a vertex. - /// - /// A vertex index. - /// A halfedge index. Halfedge must start at the specified vertex. - /// An enumerable of halfedge indices incident to the specified vertex. - /// Ordered clockwise around the vertex. - /// The returned enumerable will start with the specified halfedge. - /// - /// The specified halfedge does not originate from the specified vertex. - /// - [Obsolete("GetHalfedgesCirculator(int,int) is deprecated, please use" + - "Halfedges.GetVertexCirculator(int) instead.")] - public IEnumerable GetHalfedgesCirculator(int v, int first) - { - if (_mesh.Halfedges[first].StartVertex != v) - throw new ArgumentOutOfRangeException("Halfedge does not start at vertex."); - // TODO: The code below is the same as above. - // Can we refactor (without extra, unnecessary iterators)? - int h = first; - var hs = _mesh.Halfedges; - do - { - yield return h; - h = hs[hs.GetPairHalfedge(h)].NextHalfedge; - } - while (h != first); - } - #endregion - - #region adjacency queries - /// - /// Gets the halfedges which originate from a vertex. - /// - /// A vertex index. - /// The indices of halfedges incident to a particular vertex. - /// Ordered clockwise around the vertex. - public int[] GetHalfedges(int v) - { - return _mesh.Halfedges.GetVertexCirculator(this[v].OutgoingHalfedge).ToArray(); - } - - /// - /// Gets the halfedges which end at a vertex. - /// - /// A vertex index. - /// The opposing halfedge for each returned by . - /// Ordered clockwise around the vertex. - public int[] GetIncomingHalfedges(int v) - { - return _mesh.Halfedges.GetVertexCirculator(this[v].OutgoingHalfedge) - .Select(h => _mesh.Halfedges.GetPairHalfedge(h)).ToArray(); - } - - /// - /// Gets vertex neighbours (a.k.a. 1-ring). - /// - /// A vertex index. - /// An array of vertex indices incident to the specified vertex. - /// Ordered clockwise around the vertex. - public int[] GetVertexNeighbours(int v) - { - var hs = _mesh.Halfedges; - return _mesh.Halfedges.GetVertexCirculator(this[v].OutgoingHalfedge) - .Select(h => hs[hs.GetPairHalfedge(h)].StartVertex).ToArray(); - } - - /// - /// Gets faces incident to a vertex. - /// - /// A vertex index. - /// An array of face indices incident to the specified vertex. - /// Ordered clockwise around the vertex - public int[] GetVertexFaces(int v) - { - return _mesh.Halfedges.GetVertexCirculator(this[v].OutgoingHalfedge) - .Select(h => _mesh.Halfedges[h].AdjacentFace).ToArray(); - } - - /// - /// Gets the first incoming halfedge for a vertex. - /// - /// A vertex index. - /// The index of the halfedge paired with the specified vertex's . - public int GetIncomingHalfedge(int v) - { - return _mesh.Halfedges.GetPairHalfedge(this[v].OutgoingHalfedge); - } - #endregion - - /// - /// Gets the number of naked edges incident to this vertex. - /// - /// A vertex index. - /// The number of incident halfedges which lie on a boundary. - public int NakedEdgeCount(int v) - { - int nakedCount = 0; - var hs = _mesh.Halfedges; - foreach (int i in _mesh.Halfedges.GetVertexCirculator(this[v].OutgoingHalfedge)) - { - if (hs[i].AdjacentFace == -1 || hs[hs.GetPairHalfedge(i)].AdjacentFace == -1) - nakedCount++; - } - return nakedCount; - } - - /// - /// Gets the number of edges incident to this vertex. - /// - /// A vertex index. - /// The number of incident edges. - public int GetValence(int v) - { - int h = this[v].OutgoingHalfedge; - return _mesh.Halfedges.GetVertexCirculator(h).Count(); - } - - /// - /// A vertex is on a boundary if its outgoing halfedge has no adjacent face. - /// - /// The index of a vertex. - /// true if the specified vertex is on a boundary; otherwise, false. - /// Also returns true if the vertex is unused (i.e. no outgoing halfedge). - public bool IsBoundary(int index) - { - int h = this[index].OutgoingHalfedge; - return (h < -1 || _mesh.Halfedges[h].AdjacentFace == -1); - } - - /// - /// Gets the normal vector at a vertex. - /// - /// The index of a vertex. - /// The area weighted vertex normal. - public PlanktonXYZ GetNormal(int index) - { - PlanktonXYZ vertex = this[index].ToXYZ(); - PlanktonXYZ normal = new PlanktonXYZ(); - - var ring = this.GetVertexNeighbours(index); - int n = ring.Length; - - for (int i = 0; i < n-1; i++) - { - normal += PlanktonXYZ.CrossProduct( - this[ring[i]].ToXYZ() - vertex, - this[ring[i+1]].ToXYZ() - vertex); - } - - if (this.IsBoundary(index) == false) - { - normal += PlanktonXYZ.CrossProduct( - this[n-1].ToXYZ() - vertex, - this[0].ToXYZ() - vertex); - } - - return normal * (-1.0f / normal.Length); // return unit vector - } - - /// - /// Gets the normal vectors for all vertices in the mesh. - /// - /// The area weighted vertex normals of all vertices in the mesh. - /// - /// This will be accurate at the time of calling but will quickly - /// become outdated if you start fiddling with the mesh. - /// - public PlanktonXYZ[] GetNormals() - { - return Enumerable.Range(0, this.Count).Select(i => this.GetNormal(i)).ToArray(); - } - - /// - /// Gets the positions of all vertices. - /// - /// The positions of all vertices in the mesh. - public PlanktonXYZ[] GetPositions() - { - return Enumerable.Range(0, this.Count).Select(i => this[i].ToXYZ()).ToArray(); - } - - #region Euler operators - /// - /// Merges two vertices by collapsing the pair of halfedges between them. - /// - /// - /// The index of a halfedge between the two vertices to be merged. - /// The starting vertex of this halfedge will be retained. - /// The successor of around its vertex, or -1 on failure. - /// The invariant mesh.Vertices.MergeVertices(mesh.Vertices.SplitVertex(a, b)) will return a, - /// leaving the mesh unchanged. - public int MergeVertices(int halfedge) - { - return _mesh.Halfedges.CollapseEdge(halfedge); - - } - - /// - /// Splits the vertex into two, joined by a new pair of halfedges. - /// - /// The index of a halfedge which starts at the vertex to split. - /// The index of a second halfedge which starts at the vertex to split. - /// The new halfedge which starts at the existing vertex. - /// After the split, the halfedge will be starting at the newly added vertex. - public int SplitVertex(int first, int second) - { - var hs = _mesh.Halfedges; - // Check that both halfedges start at the same vertex - int v_old = hs[first].StartVertex; - if (v_old != hs[second].StartVertex) { return -1; } // TODO: return ArgumentException instead? - - // Create a copy of the existing vertex (user can move it afterwards if needs be) - int v_new = this.Add(this[v_old].ToXYZ()); // copy vertex by converting to XYZ and back - - // Go around outgoing halfedges, from 'second' to just before 'first' - // Set start vertex to new vertex - bool reset_v_old = false; - foreach (int h in hs.GetVertexCirculator(second)) - { - if (h == first) { break; } - hs[h].StartVertex = v_new; - // If new vertex has no outgoing yet and current he is naked... - if (this[v_new].OutgoingHalfedge == -1 && hs[h].AdjacentFace == -1) - this[v_new].OutgoingHalfedge = h; - // Also check whether existing vert's he is now incident to new one - if (h == this[v_old].OutgoingHalfedge) { reset_v_old = true; } - } - // If no naked halfedges, just use 'second' - if (this[v_new].OutgoingHalfedge == -1) { this[v_new].OutgoingHalfedge = second; } - - // Add the new pair of halfedges from old vertex to new - int h_new = hs.AddPair(v_old, v_new, hs[second].AdjacentFace); - int h_new_pair = hs.GetPairHalfedge(h_new); - hs[h_new_pair].AdjacentFace = hs[first].AdjacentFace; - - // Link new pair into mesh - hs.MakeConsecutive(hs[first].PrevHalfedge, h_new_pair); - hs.MakeConsecutive(h_new_pair, first); - hs.MakeConsecutive(hs[second].PrevHalfedge, h_new); - hs.MakeConsecutive(h_new, second); - - // Re-set existing vertex's outgoing halfedge, if necessary - if (reset_v_old) - { - this[v_old].OutgoingHalfedge = h_new; - foreach (int h in hs.GetVertexCirculator(h_new)) - { - if (hs[h].AdjacentFace == -1) { this[v_old].OutgoingHalfedge = h; } - } - } - - // return the new vertex which starts at the existing vertex - return h_new; - } - - /// - /// Erases a vertex and all incident halfedges by merging its incident faces. - /// - /// The index of a halfedge which starts at the vertex to erase. - /// The retained face will be the one adjacent to this halfedge. - /// The successor of around its original face. - public int EraseCenterVertex(int halfedgeIndex) - { - int vertexIndex = _mesh.Halfedges[halfedgeIndex].StartVertex; - - // Check that the vertex is completely surrounded by faces - if (this.IsBoundary(vertexIndex)) - throw new ArgumentException("Center vertex must not be on a boundary"); - - // Get outgoing halfedges around vertex, starting with specified halfedge - int[] vertexHalfedges = _mesh.Halfedges.GetVertexCirculator(halfedgeIndex).ToArray(); - - // Check for 2-valent vertices in the 1-ring (no antennas) - int v; - foreach (int h in vertexHalfedges) - { - v = _mesh.Halfedges.EndVertex(h); - if (this.GetHalfedges(v).Length < 3) - throw new ArgumentException("Vertex in 1-ring is 2-valent"); - } - - // Store face to keep and set its first halfedge - int faceIndex = _mesh.Halfedges[halfedgeIndex].AdjacentFace; - int firstHalfedge = _mesh.Halfedges[halfedgeIndex].NextHalfedge; - _mesh.Faces[faceIndex].FirstHalfedge = firstHalfedge; - - // Remove incident halfedges and mark faces for deletion (except first face) - _mesh.Halfedges.RemovePairHelper(vertexHalfedges[0]); - for (int i = 1; i < vertexHalfedges.Length; i++) - { - _mesh.Faces[_mesh.Halfedges[vertexHalfedges[i]].AdjacentFace] = PlanktonFace.Unset; - _mesh.Halfedges.RemovePairHelper(vertexHalfedges[i]); - } - - // Set adjacent face for all halfedges in hole - foreach (int h in _mesh.Halfedges.GetFaceCirculator(firstHalfedge)) - { - _mesh.Halfedges[h].AdjacentFace = faceIndex; - } - - // Mark center vertex for deletion - this[vertexIndex] = PlanktonVertex.Unset; - - return _mesh.Faces[faceIndex].FirstHalfedge; - } - #endregion - - /// - /// Truncates a vertex by creating a face with vertices on each of the outgoing halfedges. - /// - /// The index of a vertex. - /// The index of the newly created face. - public int TruncateVertex(int v) - { - var hs = this.GetHalfedges(v); - - // set h_new and move original vertex - int h_new = hs[0]; - - // circulate outgoing halfedges (clockwise, skip first) - for (int i = 1; i < hs.Length; i++) - { - // split vertex - int h_tmp = this.SplitVertex(hs[i], h_new); - h_new = h_tmp; // tidy-up if 'vs' is removed - } - - // split face to create new truncated face - int splitH = this._mesh.Faces.SplitFace(hs[0], h_new); - - return this._mesh.Halfedges[this._mesh.Halfedges.GetPairHalfedge(splitH)].AdjacentFace; - } - #endregion - - #region IEnumerable implementation - /// - /// Gets an enumerator that yields all faces in this collection. - /// - /// An enumerator. - public IEnumerator GetEnumerator() - { - return this._list.GetEnumerator(); - } - IEnumerator IEnumerable.GetEnumerator() - { - return this.GetEnumerator(); - } - #endregion - } -} +using System; +using System.Collections; +using System.Collections.Generic; +using System.Linq; + +namespace Plankton +{ + /// + /// Provides access to the vertices and Vertex related functionality of a Mesh. + /// + public class PlanktonVertexList : IEnumerable + { + private readonly PlanktonMesh _mesh; + private List _list; + + /// + /// Initializes a new instance of the class. + /// Should be called from the mesh constructor. + /// + /// The mesh to which this list of vertices belongs. + internal PlanktonVertexList(PlanktonMesh owner) + { + this._list = new List(); + this._mesh = owner; + + } + + /// + /// Gets the number of vertices. + /// + public int Count + { + get + { + return this._list.Count; + } + } + + #region methods + #region vertex access + #region adding + /// + /// Adds a new vertex to the end of the Vertex list. + /// + /// Vertex to add. + /// The index of the newly added vertex. + internal int Add(PlanktonVertex vertex) + { + if (vertex == null) return -1; + this._list.Add(vertex); + return this.Count - 1; + } + + /// + /// Adds a new vertex to the end of the Vertex list. + /// + /// Vertex to add. + /// The index of the newly added vertex. + internal int Add(PlanktonXYZ vertex) + { + this._list.Add(new PlanktonVertex(vertex.X,vertex.Y,vertex.Z)); + return this.Count - 1; + } + + /// + /// Adds a new vertex to the end of the Vertex list. + /// + /// X component of new vertex coordinate. + /// Y component of new vertex coordinate. + /// Z component of new vertex coordinate. + /// The index of the newly added vertex. + public int Add(double x, double y, double z) + { + return this.Add(new PlanktonVertex(x, y, z)); + } + + /// + /// Adds a new vertex to the end of the Vertex list. + /// + /// X component of new vertex coordinate. + /// Y component of new vertex coordinate. + /// Z component of new vertex coordinate. + /// The index of the newly added vertex. + public int Add(float x, float y, float z) + { + return this.Add(new PlanktonVertex(x, y, z)); + } + #endregion + + /// + /// Adds a series of new vertices to the end of the vertex list. + /// + /// A list, an array or any enumerable set of . + /// Indices of the newly created vertices. + public int[] AddVertices(IEnumerable vertices) + { + return vertices.Select(v => this.Add(v)).ToArray(); + } + + /// + /// Returns the vertex at the given index. + /// + /// + /// Index of vertex to get. + /// Must be larger than or equal to zero and smaller than the Vertex Count of the mesh. + /// + /// The vertex at the given index. + public PlanktonVertex this[int index] + { + get + { + return this._list[index]; + } + internal set + { + this._list[index] = value; + } + } + + /// + /// Sets or adds a vertex to the Vertex List. + /// If [index] is less than [Count], the existing vertex at [index] will be modified. + /// If [index] equals [Count], a new vertex is appended to the end of the vertex list. + /// If [index] is larger than [Count], the function will return false. + /// + /// Index of vertex to set. + /// X component of vertex location. + /// Y component of vertex location. + /// Z component of vertex location. + /// true on success, false on failure. + public bool SetVertex(int vertexIndex, float x, float y, float z) + { + if (vertexIndex >= 0 && vertexIndex < _list.Count) + { + var v = this._list[vertexIndex]; + v.X = x; + v.Y = y; + v.Z = z; + } + else if (vertexIndex == _list.Count) + { + this.Add(x, y, z); + } + else { return false; } + + return true; + } + + /// + /// Sets or adds a vertex to the Vertex List. + /// If [index] is less than [Count], the existing vertex at [index] will be modified. + /// If [index] equals [Count], a new vertex is appended to the end of the vertex list. + /// If [index] is larger than [Count], the function will return false. + /// + /// Index of vertex to set. + /// X component of vertex location. + /// Y component of vertex location. + /// Z component of vertex location. + /// true on success, false on failure. + public bool SetVertex(int vertexIndex, double x, double y, double z) + { + if (vertexIndex >= 0 && vertexIndex < _list.Count) + { + var v = this._list[vertexIndex]; + v.X = (float)x; + v.Y = (float)y; + v.Z = (float)z; + } + else if (vertexIndex == _list.Count) + { + this.Add(x, y, z); + } + else { return false; } + + return true; + } + #endregion + + /// + /// Helper method to remove dead vertices from the list, re-index and compact. + /// + internal int CompactHelper() + { + int marker = 0; // Location where the current vertex should be moved to + + // Run through all the vertices + for (int iter = 0; iter < _list.Count; iter++) + { + // If vertex is alive, check if we need to shuffle it down the list + if (!_list[iter].IsUnused) + { + if (marker < iter) + { + // Room to shuffle. Copy current vertex to marked slot. + _list[marker] = _list[iter]; + + // Update all halfedges which start here + int first = _list[marker].OutgoingHalfedge; + foreach (int h in _mesh.Halfedges.GetVertexCirculator(first)) + { + _mesh.Halfedges[h].StartVertex = marker; + } + } + marker++; // That spot's filled. Advance the marker. + } + } + + // Trim list down to new size + if (marker < _list.Count) { _list.RemoveRange(marker, _list.Count - marker); } + + return _list.Count - marker; + } + + /// + /// Removes all vertices that are currently not used by the Halfedge list. + /// + /// The number of unused vertices that were removed. + public int CullUnused() + { + return this.CompactHelper(); + } + + // !!! + #region traversals + /// + /// Traverses the halfedge indices which originate from a vertex. + /// + /// A vertex index. + /// An enumerable of halfedge indices incident to the specified vertex. + /// Ordered clockwise around the vertex. + [Obsolete("GetHalfedgesCirculator(int) is deprecated, please use" + + "Halfedges.GetVertexCirculator(int) instead.")] + public IEnumerable GetHalfedgesCirculator(int v) + { + int he_first = this[v].OutgoingHalfedge; + if (he_first < 0) yield break; // vertex has no connectivity, exit + int he_current = he_first; + var hs = _mesh.Halfedges; + do + { + yield return he_current; + he_current = hs[hs.GetPairHalfedge(he_current)].NextHalfedge; + } + while (he_current != he_first); + } + + /// + /// Traverses the halfedge indices which originate from a vertex. + /// + /// A vertex index. + /// A halfedge index. Halfedge must start at the specified vertex. + /// An enumerable of halfedge indices incident to the specified vertex. + /// Ordered clockwise around the vertex. + /// The returned enumerable will start with the specified halfedge. + /// + /// The specified halfedge does not originate from the specified vertex. + /// + [Obsolete("GetHalfedgesCirculator(int,int) is deprecated, please use" + + "Halfedges.GetVertexCirculator(int) instead.")] + public IEnumerable GetHalfedgesCirculator(int v, int first) + { + if (_mesh.Halfedges[first].StartVertex != v) + throw new ArgumentOutOfRangeException("Halfedge does not start at vertex."); + // TODO: The code below is the same as above. + // Can we refactor (without extra, unnecessary iterators)? + int h = first; + var hs = _mesh.Halfedges; + do + { + yield return h; + h = hs[hs.GetPairHalfedge(h)].NextHalfedge; + } + while (h != first); + } + #endregion + + #region adjacency queries + /// + /// Gets the halfedges which originate from a vertex. + /// + /// A vertex index. + /// The indices of halfedges incident to a particular vertex. + /// Ordered clockwise around the vertex. + public int[] GetHalfedges(int v) + { + return _mesh.Halfedges.GetVertexCirculator(this[v].OutgoingHalfedge).ToArray(); + } + + /// + /// Gets the halfedges which end at a vertex. + /// + /// A vertex index. + /// The opposing halfedge for each returned by . + /// Ordered clockwise around the vertex. + public int[] GetIncomingHalfedges(int v) + { + return _mesh.Halfedges.GetVertexCirculator(this[v].OutgoingHalfedge) + .Select(h => _mesh.Halfedges.GetPairHalfedge(h)).ToArray(); + } + + /// + /// Gets vertex neighbours (a.k.a. 1-ring). + /// + /// A vertex index. + /// An array of vertex indices incident to the specified vertex. + /// Ordered clockwise around the vertex. + public int[] GetVertexNeighbours(int v) + { + var hs = _mesh.Halfedges; + return _mesh.Halfedges.GetVertexCirculator(this[v].OutgoingHalfedge) + .Select(h => hs[hs.GetPairHalfedge(h)].StartVertex).ToArray(); + } + + /// + /// Gets faces incident to a vertex. + /// + /// A vertex index. + /// An array of face indices incident to the specified vertex. + /// Ordered clockwise around the vertex + public int[] GetVertexFaces(int v) + { + return _mesh.Halfedges.GetVertexCirculator(this[v].OutgoingHalfedge) + .Select(h => _mesh.Halfedges[h].AdjacentFace).ToArray(); + } + + /// + /// Gets the first incoming halfedge for a vertex. + /// + /// A vertex index. + /// The index of the halfedge paired with the specified vertex's . + public int GetIncomingHalfedge(int v) + { + return _mesh.Halfedges.GetPairHalfedge(this[v].OutgoingHalfedge); + } + #endregion + + /// + /// Gets the number of naked edges incident to this vertex. + /// + /// A vertex index. + /// The number of incident halfedges which lie on a boundary. + public int NakedEdgeCount(int v) + { + int nakedCount = 0; + var hs = _mesh.Halfedges; + foreach (int i in _mesh.Halfedges.GetVertexCirculator(this[v].OutgoingHalfedge)) + { + if (hs[i].AdjacentFace == -1 || hs[hs.GetPairHalfedge(i)].AdjacentFace == -1) + nakedCount++; + } + return nakedCount; + } + + /// + /// Gets the number of edges incident to this vertex. + /// + /// A vertex index. + /// The number of incident edges. + public int GetValence(int v) + { + int h = this[v].OutgoingHalfedge; + return _mesh.Halfedges.GetVertexCirculator(h).Count(); + } + + /// + /// A vertex is on a boundary if its outgoing halfedge has no adjacent face. + /// + /// The index of a vertex. + /// true if the specified vertex is on a boundary; otherwise, false. + /// Also returns true if the vertex is unused (i.e. no outgoing halfedge). + public bool IsBoundary(int index) + { + int h = this[index].OutgoingHalfedge; + return (h < -1 || _mesh.Halfedges[h].AdjacentFace == -1); + } + + /// + /// Gets the normal vector at a vertex. + /// + /// The index of a vertex. + /// The area weighted vertex normal. + public PlanktonXYZ GetNormal(int index) + { + PlanktonXYZ vertex = this[index].ToXYZ(); + PlanktonXYZ normal = new PlanktonXYZ(); + + var ring = this.GetVertexNeighbours(index); + int n = ring.Length; + + for (int i = 0; i < n-1; i++) + { + normal += PlanktonXYZ.CrossProduct( + this[ring[i]].ToXYZ() - vertex, + this[ring[i+1]].ToXYZ() - vertex); + } + + if (this.IsBoundary(index) == false) + { + normal += PlanktonXYZ.CrossProduct( + this[n-1].ToXYZ() - vertex, + this[0].ToXYZ() - vertex); + } + + return normal * (-1.0f / normal.Length); // return unit vector + } + + /// + /// Gets the normal vectors for all vertices in the mesh. + /// + /// The area weighted vertex normals of all vertices in the mesh. + /// + /// This will be accurate at the time of calling but will quickly + /// become outdated if you start fiddling with the mesh. + /// + public PlanktonXYZ[] GetNormals() + { + return Enumerable.Range(0, this.Count).Select(i => this.GetNormal(i)).ToArray(); + } + + /// + /// Gets the positions of all vertices. + /// + /// The positions of all vertices in the mesh. + public PlanktonXYZ[] GetPositions() + { + return Enumerable.Range(0, this.Count).Select(i => this[i].ToXYZ()).ToArray(); + } + + #region Euler operators + /// + /// Merges two vertices by collapsing the pair of halfedges between them. + /// + /// + /// The index of a halfedge between the two vertices to be merged. + /// The starting vertex of this halfedge will be retained. + /// The successor of around its vertex, or -1 on failure. + /// The invariant mesh.Vertices.MergeVertices(mesh.Vertices.SplitVertex(a, b)) will return a, + /// leaving the mesh unchanged. + public int MergeVertices(int halfedge) + { + return _mesh.Halfedges.CollapseEdge(halfedge); + + } + + /// + /// Splits the vertex into two, joined by a new pair of halfedges. + /// + /// The index of a halfedge which starts at the vertex to split. + /// The index of a second halfedge which starts at the vertex to split. + /// The new halfedge which starts at the existing vertex. + /// After the split, the halfedge will be starting at the newly added vertex. + public int SplitVertex(int first, int second) + { + var hs = _mesh.Halfedges; + // Check that both halfedges start at the same vertex + int v_old = hs[first].StartVertex; + if (v_old != hs[second].StartVertex) { return -1; } // TODO: return ArgumentException instead? + + // Create a copy of the existing vertex (user can move it afterwards if needs be) + int v_new = this.Add(this[v_old].ToXYZ()); // copy vertex by converting to XYZ and back + + // Go around outgoing halfedges, from 'second' to just before 'first' + // Set start vertex to new vertex + bool reset_v_old = false; + foreach (int h in hs.GetVertexCirculator(second)) + { + if (h == first) { break; } + hs[h].StartVertex = v_new; + // If new vertex has no outgoing yet and current he is naked... + if (this[v_new].OutgoingHalfedge == -1 && hs[h].AdjacentFace == -1) + this[v_new].OutgoingHalfedge = h; + // Also check whether existing vert's he is now incident to new one + if (h == this[v_old].OutgoingHalfedge) { reset_v_old = true; } + } + // If no naked halfedges, just use 'second' + if (this[v_new].OutgoingHalfedge == -1) { this[v_new].OutgoingHalfedge = second; } + + // Add the new pair of halfedges from old vertex to new + int h_new = hs.AddPair(v_old, v_new, hs[second].AdjacentFace); + int h_new_pair = hs.GetPairHalfedge(h_new); + hs[h_new_pair].AdjacentFace = hs[first].AdjacentFace; + + // Link new pair into mesh + hs.MakeConsecutive(hs[first].PrevHalfedge, h_new_pair); + hs.MakeConsecutive(h_new_pair, first); + hs.MakeConsecutive(hs[second].PrevHalfedge, h_new); + hs.MakeConsecutive(h_new, second); + + // Re-set existing vertex's outgoing halfedge, if necessary + if (reset_v_old) + { + this[v_old].OutgoingHalfedge = h_new; + foreach (int h in hs.GetVertexCirculator(h_new)) + { + if (hs[h].AdjacentFace == -1) { this[v_old].OutgoingHalfedge = h; } + } + } + + // return the new vertex which starts at the existing vertex + return h_new; + } + + /// + /// Erases a vertex and all incident halfedges by merging its incident faces. + /// + /// The index of a halfedge which starts at the vertex to erase. + /// The retained face will be the one adjacent to this halfedge. + /// The successor of around its original face. + public int EraseCenterVertex(int halfedgeIndex) + { + int vertexIndex = _mesh.Halfedges[halfedgeIndex].StartVertex; + + // Check that the vertex is completely surrounded by faces + if (this.IsBoundary(vertexIndex)) + throw new ArgumentException("Center vertex must not be on a boundary"); + + // Get outgoing halfedges around vertex, starting with specified halfedge + int[] vertexHalfedges = _mesh.Halfedges.GetVertexCirculator(halfedgeIndex).ToArray(); + + // Check for 2-valent vertices in the 1-ring (no antennas) + int v; + foreach (int h in vertexHalfedges) + { + v = _mesh.Halfedges.EndVertex(h); + if (this.GetHalfedges(v).Length < 3) + throw new ArgumentException("Vertex in 1-ring is 2-valent"); + } + + // Store face to keep and set its first halfedge + int faceIndex = _mesh.Halfedges[halfedgeIndex].AdjacentFace; + int firstHalfedge = _mesh.Halfedges[halfedgeIndex].NextHalfedge; + _mesh.Faces[faceIndex].FirstHalfedge = firstHalfedge; + + // Remove incident halfedges and mark faces for deletion (except first face) + _mesh.Halfedges.RemovePairHelper(vertexHalfedges[0]); + for (int i = 1; i < vertexHalfedges.Length; i++) + { + _mesh.Faces[_mesh.Halfedges[vertexHalfedges[i]].AdjacentFace] = PlanktonFace.Unset; + _mesh.Halfedges.RemovePairHelper(vertexHalfedges[i]); + } + + // Set adjacent face for all halfedges in hole + foreach (int h in _mesh.Halfedges.GetFaceCirculator(firstHalfedge)) + { + _mesh.Halfedges[h].AdjacentFace = faceIndex; + } + + // Mark center vertex for deletion + this[vertexIndex] = PlanktonVertex.Unset; + + return _mesh.Faces[faceIndex].FirstHalfedge; + } + #endregion + + /// + /// Truncates a vertex by creating a face with vertices on each of the outgoing halfedges. + /// + /// The index of a vertex. + /// The index of the newly created face. + public int TruncateVertex(int v) + { + var hs = this.GetHalfedges(v); + + // set h_new and move original vertex + int h_new = hs[0]; + + // circulate outgoing halfedges (clockwise, skip first) + for (int i = 1; i < hs.Length; i++) + { + // split vertex + int h_tmp = this.SplitVertex(hs[i], h_new); + h_new = h_tmp; // tidy-up if 'vs' is removed + } + + // split face to create new truncated face + int splitH = this._mesh.Faces.SplitFace(hs[0], h_new); + + return this._mesh.Halfedges[this._mesh.Halfedges.GetPairHalfedge(splitH)].AdjacentFace; + } + #endregion + + #region by dyliu + public void AssignVertexIndex() + { + for (int i = 0; i < this.Count(); i++) + { + this[i].Index = i; + + } + } + + + #endregion + + + + #region IEnumerable implementation + /// + /// Gets an enumerator that yields all faces in this collection. + /// + /// An enumerator. + public IEnumerator GetEnumerator() + { + return this._list.GetEnumerator(); + } + IEnumerator IEnumerable.GetEnumerator() + { + return this.GetEnumerator(); + } + #endregion + } +} diff --git a/src/Plankton/packages.config b/src/Plankton/packages.config new file mode 100644 index 0000000..8caf6b7 --- /dev/null +++ b/src/Plankton/packages.config @@ -0,0 +1,4 @@ + + + + \ No newline at end of file diff --git a/src/PlanktonFold/GhcMeshAnalysis.cs b/src/PlanktonFold/GhcMeshAnalysis.cs new file mode 100644 index 0000000..176b1f3 --- /dev/null +++ b/src/PlanktonFold/GhcMeshAnalysis.cs @@ -0,0 +1,227 @@ +using System; +using System.Linq; +using System.Collections.Generic; +using Plankton; +using Grasshopper; +using PlanktonGh; +using Grasshopper.Kernel; +using Grasshopper.Kernel.Data; +using Rhino.Geometry; +using MathNet.Numerics.LinearAlgebra; + +namespace PlanktonFold +{ + public class GhcMeshAnalysis : GH_Component + { + + public GhcMeshAnalysis() + : base("MeshAnalysis", "MeshAnalysis", + "Analyse the input geometry, can be plankton mesh, rhino mesh, or a list of surfaces", + "MT", "Analysis") + { + } + + protected override void RegisterInputParams(GH_Component.GH_InputParamManager pManager) + { + // 0 + pManager.AddSurfaceParameter("Surfaces", "Surfaces", "Surfaces as list", GH_ParamAccess.list); + pManager[0].Optional = true; + + // 1 + pManager.AddMeshParameter("Mesh", "Mesh", "Mesh", GH_ParamAccess.item); + pManager[1].Optional = true; + + // 2 + pManager.AddGenericParameter("PlanktonMesh", "PlanktonMesh", "PlanktonMesh", GH_ParamAccess.item); + pManager[2].Optional = true; + + } + + protected override void RegisterOutputParams(GH_Component.GH_OutputParamManager pManager) + { + // 0 + pManager.AddMeshParameter("Mesh", "Mesh", "Mesh", GH_ParamAccess.item); + + // 1 + pManager.AddPointParameter("cVertices", "cVertices", "cVertices", GH_ParamAccess.list); // inner vertices with constraints + + // 2 + pManager.AddGenericParameter("NeighborEdges", "NeighborEdges", "NeighborEdges", GH_ParamAccess.tree); + + // 3 + pManager.AddNumberParameter("Sector Angles", "Sector Angles", "theta", GH_ParamAccess.tree); + + // 4 + pManager.AddNumberParameter("Fold Angles", "Fold Angles", "rho", GH_ParamAccess.tree); + + // 5 + pManager.AddGenericParameter("F Matrix", "F Matrix", "F Matrix", GH_ParamAccess.list); + + // 6 + pManager.AddPlaneParameter("Pln", "Pln", "Pln", GH_ParamAccess.tree); + + // 7 + pManager.AddGenericParameter("PMesh", "PMesh", "PMesh", GH_ParamAccess.item); + + // 8 + pManager.AddLineParameter("M", "M", "M", GH_ParamAccess.list); + + // 9 + pManager.AddLineParameter("V", "V", "V", GH_ParamAccess.list); + + } + + Mesh M = new Mesh(); + PlanktonMesh P = new PlanktonMesh(); + + protected override void SolveInstance(IGH_DataAccess DA) + { + // define Mesh(M) & PlanktonMesh(P) + List surfaces = new List(); + Mesh mesh = new Mesh(); + if (DA.GetDataList("Surfaces", surfaces)) { M = RhinoSupport.SrfToRhinoMesh(surfaces); }; + if (DA.GetData("Mesh", ref mesh)) { M = mesh; }; + //if (DA.GetData("PlanktonMesh", ref )) { M = mesh; }; + P = RhinoSupport.ToPlanktonMesh(M); + + // assaign index to faces, half edges and vertices of the planktonmesh, so that it's easier to query from lists + P.Faces.AssignFaceIndex(); + P.Halfedges.AssignHalfEdgeIndex(); + P.Vertices.AssignVertexIndex(); + + // determine MV + // select all inner edges of the mesh (boundary edges don't have MV properties) + List innerEdges = P.Halfedges.ToList().Where(o => o.AdjacentFace != -1 && + P.Halfedges[P.Halfedges.GetPairHalfedge(o.Index)].AdjacentFace != -1 + ).ToList(); + foreach (PlanktonHalfedge e in innerEdges) + e.MV = RhinoSupport.MVDetermination(P, e.Index); + + // get the inner vertices as index and point + List cVertices = RhinoSupport.GetConstraintVertices(P); + List cVertexIndices = RhinoSupport.GetConstraintVertexIndices(P); + + // get the neighbour edges of all inner vertices in a datatree + DataTree neighbourEdges = new DataTree(); + for (int j = 0; j < cVertexIndices.Count(); j++) + { + GH_Path jPth = new GH_Path(j); + neighbourEdges.AddRange(RhinoSupport.NeighbourVertexEdges(P, cVertexIndices[j]) + .Select(o => RhinoSupport.HalfEdgeToLine(P, o)).ToList(), jPth); + } + + // get the sector angles of all inner vertices in a datatree + DataTree sectorAngles = new DataTree(); + for (int j = 0; j < cVertexIndices.Count(); j++) + { + GH_Path jPth = new GH_Path(j); + sectorAngles.AddRange(RhinoSupport.GetSectorAngles(P, cVertexIndices[j], + RhinoSupport.NeighbourVertexEdges(P, cVertexIndices[j])) + .ToList(), jPth); + } + + // get the fold angles of all inner vertices in a datatree + DataTree foldAngles = new DataTree(); + for (int j = 0; j < cVertexIndices.Count(); j++) // for the j th inner vertice + { + GH_Path jPth = new GH_Path(j); + foldAngles.AddRange(RhinoSupport.GetFoldAngles(P, cVertexIndices[j], + RhinoSupport.NeighbourVertexEdges(P, cVertexIndices[j])) + .ToList(), jPth); + } + + + + // compute F matrices for all inner vertices + // in the order of inner vertex, each one has a F matrix. A F matrix is a indentity matrix when this constraint is satisfied + List> FMatrix = new List>(); + for (int j = 0; j < cVertexIndices.Count(); j++) + { + List edges = RhinoSupport.NeighbourVertexEdges(P, cVertexIndices[j]); + List rhos = RhinoSupport.GetFoldAngles(P, cVertexIndices[j], edges); // checked + List thetas = RhinoSupport.GetSectorAngles(P, cVertexIndices[j], edges); // checked + FMatrix.Add(Solver.F(rhos, thetas)); + } + + // the coordinate system of all constraint vertices + DataTree pln = new DataTree(); + for (int i = 0; i < cVertexIndices.Count; i++) + { + int neighbourEdgeCount = + RhinoSupport.NeighbourVertexEdges(P, cVertexIndices[i]).Count; + GH_Path iPth = new GH_Path(i); + // xx pointing outwards along one foldline + List xx = neighbourEdges.Branch(iPth).Select(o => o.UnitTangent).ToList(); + // ff are the index of adjacent faces of a cVertice + List ff = RhinoSupport.NeighbourVertexEdges(P, P.Vertices[cVertexIndices[i]].Index).Select(o => o.AdjacentFace).ToList(); + // zz are the face normals + List zz = ff.Select(o => RhinoSupport.GetFaceNormal(P, o).Last().UnitTangent).ToList(); + + List iPlanes = new List(); + for (int j = 0; j < neighbourEdgeCount; j++) + { + Plane jPln = new Plane(cVertices[i], xx[j], Vector3d.CrossProduct(zz[j], xx[j])); + iPlanes.Add(jPln); + } + pln.AddRange(iPlanes, iPth); + + } + + /* + DataTree foldAngles = new DataTree(); + for (int j = 0; j < cVertexIndices.Count(); j++) // for the j th inner vertice + { + GH_Path jPth = new GH_Path(j); + List jVertexPlns = pln.Branches[j]; + + List ifoldAngles = new List(); + + for(int i = 0; i < jVertexPlns.Count; i++) + { + if(i != jVertexPlns.Count - 1) // not the last plane + ifoldAngles.Add(Math.PI - Vector3d.VectorAngle(jVertexPlns[i].Normal, jVertexPlns[i+1].Normal)); + else + ifoldAngles.Add(Math.PI - Vector3d.VectorAngle(jVertexPlns[i].Normal, jVertexPlns[0].Normal)); + + } + foldAngles.AddRange(ifoldAngles, jPth); + } + */ + + DA.SetData("Mesh", RhinoSupport.ToRhinoMesh(P)); + DA.SetDataList("cVertices", cVertices); + DA.SetDataTree(2, neighbourEdges); + DA.SetDataTree(3, sectorAngles); + DA.SetDataTree(4, foldAngles); + DA.SetDataList("F Matrix", FMatrix); + DA.SetDataTree(6, pln); + DA.SetData(7, P); + + // M + List mFoldLine = P.Halfedges.Where(o => o.MV == 1).ToList().Select(p => RhinoSupport.HalfEdgeToLine(P, p)).ToList(); + DA.SetDataList("M", mFoldLine); + + // V + List vFoldLine = P.Halfedges.Where(o => o.MV == -1).ToList().Select(p => RhinoSupport.HalfEdgeToLine(P, p)).ToList(); + DA.SetDataList("V", vFoldLine); + + #region unused test + + #endregion + } + + protected override System.Drawing.Bitmap Icon + { + get + { + + return Properties.Resources.mesh_analysis_Icon_06__06; + } + } + + public override Guid ComponentGuid + { + get { return new Guid("{ae648a75-b82f-4d4e-b7ca-1f06abe896e4}"); } + } + } +} diff --git a/src/PlanktonFold/GhcSofiStaightLine.cs b/src/PlanktonFold/GhcSofiStaightLine.cs new file mode 100644 index 0000000..761ab23 --- /dev/null +++ b/src/PlanktonFold/GhcSofiStaightLine.cs @@ -0,0 +1,317 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using Grasshopper.Kernel; +using Rhino.Geometry; +using PlanktonGh; +using Plankton; +using PlanktonFold; +using Grasshopper; +using Grasshopper.Kernel.Data; + +namespace PlanktonFold +{ + public class GhcSofiStaightLine : GH_Component + { + + public GhcSofiStaightLine() + : base("GhcSofiStaightLine", "Sofi_StreightLineFold", + "generate the geometry for straight line folding sofistik simulation, including KNOT, QUAD, Stab(beam), SEIL(cable), constraint", + "MT", "Sofistik") + { + } + + protected override void RegisterInputParams(GH_Component.GH_InputParamManager pManager) + { + // 0 + pManager.AddGenericParameter("Mesh", "Mesh", "crease pattern", GH_ParamAccess.item); + + // 1 + pManager.AddNumberParameter("Hinge Width", "Hinge Width", "Hinge Width", GH_ParamAccess.item); + + // 2 + // if the input mesh is non flat, MV assignment is fixed, so this input is redundant. + pManager.AddGenericParameter("MV Assignment", "MV Assignment", "MV Assignment", GH_ParamAccess.list); + pManager[2].Optional = true; + + // 3 + pManager.AddNumberParameter("Actuation Assignment", "Actuation Assignment", "Actuation Assignment", GH_ParamAccess.list); // 1 as actuation, 0 as no actuation + pManager[3].Optional = true; + + // 4 + pManager.AddNumberParameter("Subdivide Count", "Subdivide Count", "Subdivide Count", GH_ParamAccess.item); + + // 5 + + } + + protected override void RegisterOutputParams(GH_Component.GH_OutputParamManager pManager) + { + // 0 + pManager.AddPointParameter("Cable Start Nr", "Cable Start Nr", "Cable Start Nr", GH_ParamAccess.list); + + // 1 + pManager.AddPointParameter("Cable End Nr", "Cable End Nr", "Cable End Nr", GH_ParamAccess.list); + + // 2 + pManager.AddPointParameter("Beam Start Nr", "Beam Start Nr", "Beam Start Nr", GH_ParamAccess.list); + + // 3 + pManager.AddPointParameter("Beam End Nr", "Beam End Nr", "Beam End Nr", GH_ParamAccess.list); + + // 4 + pManager.AddGenericParameter("FoldLines", "FoldLines", "FoldLines", GH_ParamAccess.tree); + + // 5 + pManager.AddGenericParameter("Points Tree", "Points Tree", "Points Tree", GH_ParamAccess.tree); + + // 6 + pManager.AddMeshParameter("Subdivided Mesh", "Subdivided Mesh", "Subdivided Mesh", GH_ParamAccess.item); + + // 7 + pManager.AddPointParameter("Moved Points", "Moved Points", "Moved Points", GH_ParamAccess.tree); + + // 8 + pManager.AddLineParameter("Cable Tree", "Cable Tree", "Cable Tree", GH_ParamAccess.tree); + + // 9 + pManager.AddLineParameter("Short Beam Vertical", "Short Beam Vertical", "Short Beam Vertical", GH_ParamAccess.tree); + + // 10 + pManager.AddLineParameter("Short Beam Level", "Short Beam Level", "Short Beam Level", GH_ParamAccess.tree); + + // 11 + pManager.AddPointParameter("Fix Points on Mesh", "Fix Points on Mesh", "Fix Points on Mesh", GH_ParamAccess.tree); + + // 12 + + + } + + protected override void SolveInstance(IGH_DataAccess DA) + { + Mesh M = new Mesh(); + DA.GetData("Mesh", ref M); + PlanktonMesh P = RhinoSupport.ToPlanktonMesh(M); + + double hingeWidth = 0.1; + DA.GetData("Hinge Width", ref hingeWidth); + + List mvAssignment = new List(); + DA.GetDataList("MV Assignment", mvAssignment); + + List actuationAssignment = new List(); + DA.GetDataList("Actuation Assignment", actuationAssignment); + + // ======================fix points on the mesh plane====================== + + Vector3d planeNormal = RhinoSupport.GetFaceNormal(P, 0).First().UnitTangent; // a unit vector of the mesh surface vector + List foldLines = RhinoSupport.GetInnerEdges(P); + List moveVectors = foldLines.Select(o => Vector3d.CrossProduct(o.UnitTangent, planeNormal)).ToList(); + int foldLineCount = foldLines.Count / 2; + int foldlineDivide = 5; // how many points on one fold line + + //each foldline becomes 3 lines + List> linesToSubdivide = new List>(); + List allFoldLinePts = new List(); + DataTree linesTree = new DataTree(); + DataTree pointsTree = new DataTree(); + + // loop for each foldline + for (int i = 0; i < foldLineCount ; i++) + { + // add first line + List threeLines = new List(); + + // construct translation movements + Line l1 = foldLines[2 * i]; + Line l2 = foldLines[2 * i + 1]; + + l1.Transform(Transform.Translation(moveVectors[2 * i] * (hingeWidth/2.0))); + l2.Transform(Transform.Translation(moveVectors[2 * i + 1] * (hingeWidth / 2.0))); + + // add three lines to the list + threeLines.Add( new Line(l1.PointAt(0.1), l1.PointAt(0.9) )); + threeLines.Add(new Line(foldLines[2 * i].PointAt(0.1), foldLines[2 * i].PointAt(0.9))); // duplicate the line + threeLines.Add(new Line(l2.PointAt(0.9), l2.PointAt(0.1))); + + linesToSubdivide.Add(threeLines); + + // put three lines in a node in the data tree + GH_Path iPath = new GH_Path(i); + linesTree.AddRange(threeLines, iPath); + foldLinePoints iFoldLinePts = new foldLinePoints(); + iFoldLinePts.foldLinePts = new List>(); + + for (int j = 0; j <= foldlineDivide; j++) + { + // where the points are located on the line? + double p = j / (double)foldlineDivide; + List threePts = new List(); + + // append 3 points on position p/foldlineDivide + //threeLines = threeLines.Select(o => o.ToNurbsCurve()).ToList(); + + threePts.Add(threeLines[0].PointAt(p)); + threePts.Add(threeLines[1].PointAt(p)); + threePts.Add(threeLines[2].PointAt(p)); + + GH_Path ijPath = new GH_Path(i, j); + + pointsTree.AddRange(threePts, ijPath); + iFoldLinePts.foldLinePts.Add(threePts); + allFoldLinePts.Add(iFoldLinePts); + } + } + + // flatten into a list + DA.SetDataTree(4, linesTree); + DA.SetDataTree(5, pointsTree); + + // Mesh Subdivision + // trim points tree in a list for the mesh adjustment + + List fixPoints = pointsTree.AllData(); + + double maxSubdivision = 0.0; + DA.GetData("Subdivide Count", ref maxSubdivision); + + // divide + int count = 0; + do + { + P = RhinoSupport.QuadSubdivide(P); + count += 1; + } while (count < maxSubdivision); + + // move + RhinoSupport.MoveVertices(P, fixPoints); + + Mesh subdividedMesh = RhinoSupport.ToRhinoMesh(P); + DA.SetData("Subdivided Mesh", subdividedMesh); + + + // =============================move points up or down according to MV assignment============================= + //List hingedPOintsMoved = new List(); + DataTree movedPointsTree = new DataTree(); + + DataTree cablesTree = new DataTree(); + DataTree cableStartNr = new DataTree(); + DataTree cableEndNr = new DataTree(); + + DataTree shortbeamTreeNormal = new DataTree(); + DataTree shortbeamTreeLevel = new DataTree(); + + int fineMehsVertixCount = subdividedMesh.Vertices.Count; // when outputing the cable + + for (int i = 0; i < mvAssignment.Count; i++) + { + for (int j = 0; j <= foldlineDivide; j++) + { + GH_Path ijPath = new GH_Path(i, j); + List pts = pointsTree.Branch(ijPath); + + // if -1, valley, move upwards + if (mvAssignment[i] == "-1") + { + List movedPts = new List(); + + foreach (Point3d pt in pts) + { + Point3d movedPt = pt; + movedPt.Transform(Transform.Translation(planeNormal*0.2)); + movedPts.Add(movedPt); + } + movedPointsTree.AddRange(movedPts, ijPath); + + Line cable1 = new Line(movedPts[0], movedPts[1]); + Line cable2 = new Line(movedPts[1], movedPts[2]); + cablesTree.AddRange(new List { cable1, cable2 }, ijPath); + } + + // if +1, mountain, move downwards + else if (mvAssignment[i] == "1") + { + List movedPts = new List(); + + foreach (Point3d pt in pts) + { + Point3d movedPt = pt; + movedPt.Transform(Transform.Translation(planeNormal * 0.2 * (-1))); + movedPts.Add(movedPt); + } + movedPointsTree.AddRange(movedPts, ijPath); + Line cable1 = new Line(movedPts[0], movedPts[1]); + Line cable2 = new Line(movedPts[1], movedPts[2]); + + cablesTree.AddRange(new List { cable1, cable2 }, ijPath); + } + } + } + + // loop to create vertical beam connection + for (int i = 0; i < foldLineCount; i++) + { + for (int j = 0; j <= foldlineDivide; j++) + { + GH_Path ijPath = new GH_Path(i, j); + List ptsOnMesh = pointsTree.Branch(ijPath); + List ptsMoved = movedPointsTree.Branch(ijPath); + List shortbeamN = new List(); + for (int k = 0; k < ptsOnMesh.Count; k++) + shortbeamN.Add(new Line(ptsOnMesh[k], ptsMoved[k])); + shortbeamTreeNormal.AddRange(shortbeamN, ijPath); + + } + } + + // loop to create level beam connection + for (int i = 0; i < foldLineCount; i++) + { + for (int j = 0; j < foldlineDivide; j++) + { + GH_Path ijPath = new GH_Path(i, j); + GH_Path ijjPath = new GH_Path(i, j+1); + + List ptsMovedij = movedPointsTree.Branch(ijPath); + List ptsMovedijj = movedPointsTree.Branch(ijjPath); + + for (int k = 0; k < ptsMovedij.Count; k++) + { + List shortbeamL = new List(); + shortbeamL.Add(new Line(ptsMovedij[k], ptsMovedijj[k])); + shortbeamTreeLevel.AddRange(shortbeamL, ijPath); + } + + + } + } + + DA.SetDataTree(7, movedPointsTree); + DA.SetDataTree(8, cablesTree); + DA.SetDataTree(9, shortbeamTreeNormal); + DA.SetDataTree(10, shortbeamTreeLevel); + DA.SetDataTree(11, pointsTree); + } + + protected override System.Drawing.Bitmap Icon + { + get + { + + return null; + } + } + + public override Guid ComponentGuid + { + get { return new Guid("{4170ec9e-4508-43c7-802c-0b00b4d3969d}"); } + } + + public struct foldLinePoints + { + public List> foldLinePts; + + } + } +} \ No newline at end of file diff --git a/src/PlanktonFold/GhcStructureFold.cs b/src/PlanktonFold/GhcStructureFold.cs new file mode 100644 index 0000000..acf3bd3 --- /dev/null +++ b/src/PlanktonFold/GhcStructureFold.cs @@ -0,0 +1,226 @@ +using System; +using System.Collections.Generic; + +using Grasshopper.Kernel; +using Rhino.Geometry; + +using PlanktonFold; +using Plankton; +using PlanktonGh; +// matrix +using MathNet.Numerics.LinearAlgebra; + +namespace PlanktonFold +{ + public class GhcStructureFold : GH_Component + { + + public GhcStructureFold() + : base("GhcStructureFold", "GhcStructureFold", + "mode analysise", + "MT", "Analysis") + { + } + + + protected override void RegisterInputParams(GH_Component.GH_InputParamManager pManager) + { + // 0 + pManager.AddNumberParameter("kFold", "kFold", "kFold", GH_ParamAccess.item); + pManager[0].Optional = true; + + // 1 + pManager.AddNumberParameter("kFace", "kFace", "kFace", GH_ParamAccess.item); + pManager[1].Optional = true; + + // 2 + pManager.AddMeshParameter("triangulatedMesh", "triangulatedMesh", "triangulatedMesh", GH_ParamAccess.item); + pManager[1].Optional = true; + + // 3 + pManager.AddMeshParameter("originalMesh", "originalMesh", "originalMesh", GH_ParamAccess.item); + + + + + } + + + protected override void RegisterOutputParams(GH_Component.GH_OutputParamManager pManager) + { + // 0 + pManager.AddMeshParameter("MorphingMesh", "MorphingMesh", "MorphingMesh", GH_ParamAccess.list); + + // 1 + pManager.AddGenericParameter("T Matrix", "T Matrix", "T Matrix", GH_ParamAccess.item); + + } + + Mesh triM = new Mesh(); + Mesh quadM = new Mesh(); + + + + protected override void SolveInstance(IGH_DataAccess DA) + { + // triangulated mesh + Mesh tridMesh = new Mesh(); + if (DA.GetData("triangulatedMesh", ref tridMesh)) { triM = tridMesh; }; + + // original quad mesh + Mesh quadMesh = new Mesh(); + if (DA.GetData("originalMesh", ref quadMesh)) { quadM = quadMesh; }; + + int bFold = quadM.TopologyEdges.Count; // fold bar + int bAll = triM.TopologyEdges.Count; + int bFace = bAll - bFold; // face bar + + // B = b + bb + int n = triM.Vertices.Count; + + #region math + + /* + Reference: Schenk and Guest 2010, Origami Folding: A Structural Engineering Approach + + equations: + EQUIL: + A t = f + [3n*b_all] [b_all*1] [3n*1] + COM: + C d = e + [b_all*3n] [3n*1] [b_all*1] + MAT: + G_fold e_fold = t_fold + [bFold*bFold] [bFold*1] [bFold*1] + G_face e_face = dtheta + [G_face*G_face] [G_face*1] [G_face*1] + + what is J + J = dF = dFdx*dx + dFdy*dy + dFdz*dz = dFdtheta* dtheta + + what is F + F is additional constraints, F = sin(theta) = sin( theta(x,y,z) ), thus dFdx, dFdy, dFdz is solvable. + + + K = C^T G C + + [3n*bAll] [bAll*bAll] [bAll*3n] + Ja^T G_Ja Ja + + [3n*bFace] [bFace*bFace] [bFace*3n] + Jo^T G_Jo Jo + [3n*bFold] [bFold*bFold] [bFold*3n] + + + */ + #endregion + + var doubleMatrix = Matrix.Build; + + Matrix K = doubleMatrix.DenseIdentity(3*n); // global stiffness matrix + // 3n * 3n + + #region axial + Matrix C = doubleMatrix.DenseIdentity(bAll, 3*n); + // bFold * 3n + + Matrix G = doubleMatrix.DenseIdentity(bAll, bAll); + // bFold * bFold + + // for a 3d bar with 6 DOF + // global coordinate: xyz, local coordinate: x_hat, y_hat, z_hat + + // local K + Matrix K_hat; + double[,] _K_hat = + { + {1,0,0,-1,0,0 }, + {0,0,0,0,0,0, }, + {-1,0,0,1,0,0 } + }; + K_hat = doubleMatrix.SparseOfArray(_K_hat); + + + // global coordinate + Vector3d worldX = new Vector3d(1, 0, 0); + Vector3d worldY = new Vector3d(0, 1, 0); + Vector3d worldZ = new Vector3d(0, 0, 1); + Plane worldCoor = new Plane(Point3d.Origin, worldX, worldY); + + // + List> globalAxialKes = new List>(); + for ( int i = 0; i < triM.TopologyEdges.Count; i++) + { + Line iBar = triM.TopologyEdges.EdgeLine(i); + Matrix iT = doubleMatrix.DenseOfArray(RhinoSupport.getTranforamtionArray(iBar, worldCoor)); + globalAxialKes.Add(iT); + + } + + // 3n * 3n + Matrix globalAxialK = doubleMatrix.Dense(triM.Vertices.Count * 3, triM.Vertices.Count * 3); + + // loop bars + for (int i = 0; i < triM.TopologyEdges.Count; i++) + { + int startNode = triM.TopologyEdges.GetTopologyVertices(i).I; + int endNode = triM.TopologyEdges.GetTopologyVertices(i).J; + + // element K of ith bar + Matrix iGlobalAxialKe = globalAxialKes[i]; // 6*6 + + // extract element K of ith bar from global K + Matrix II_subM = globalAxialK.SubMatrix(startNode * 3, 3, startNode * 3, 3); // 3*3 + Matrix II_subM_ = II_subM.Add(iGlobalAxialKe.SubMatrix(0, 3, 0, 3)); + globalAxialK.SetSubMatrix(startNode * 3, startNode * 3, II_subM_); + + Matrix IJ_subM = globalAxialK.SubMatrix(startNode * 3, 3, endNode * 3, 3); // 3*3 + Matrix IJ_subM_ = IJ_subM.Add(iGlobalAxialKe.SubMatrix(0, 3, 3, 3)); + globalAxialK.SetSubMatrix(startNode * 3, endNode * 3, IJ_subM_); + + Matrix JI_subM = globalAxialK.SubMatrix(endNode * 3, 3, startNode * 3, 3); // 3*3 + Matrix JI_subM_ = JI_subM.Add(iGlobalAxialKe.SubMatrix(3, 3, 0, 3)); + globalAxialK.SetSubMatrix(endNode * 3, startNode * 3, JI_subM_); + + Matrix JJ_subM = globalAxialK.SubMatrix(endNode * 3, 3, endNode * 3, 3); // 3*3 + Matrix JJ_subM_ = JJ_subM.Add(iGlobalAxialKe.SubMatrix(3, 3, 3, 3)); + globalAxialK.SetSubMatrix(endNode * 3, endNode * 3, JJ_subM_); + + } + #endregion + + #region bending face + Matrix Ja = doubleMatrix.DenseIdentity(bFace, 3*n); + // bFace * 3n + + Matrix G_Ja = doubleMatrix.DenseIdentity(bFace, bFace); + // bFace * bFace + #endregion + + #region bending fold + Matrix Jo = doubleMatrix.DenseIdentity(bFold, 3 * n); + // bFace * 3n + + Matrix G_Jo = doubleMatrix.DenseIdentity(bFold, bFold); + // bFace * bFace + #endregion + + DA.SetData("T Matrix", globalAxialK); + } + + protected override System.Drawing.Bitmap Icon + { + get + { + //You can add image files to your project resources and access them like this: + // return Resources.IconForThisComponent; + return null; + } + } + + + public override Guid ComponentGuid + { + get { return new Guid("7b56dc34-6bf0-4f0e-ac4f-42e11d62ebe9"); } + } + } +} \ No newline at end of file diff --git a/src/PlanktonFold/GhcSubdivisionQuad.cs b/src/PlanktonFold/GhcSubdivisionQuad.cs new file mode 100644 index 0000000..028ecbc --- /dev/null +++ b/src/PlanktonFold/GhcSubdivisionQuad.cs @@ -0,0 +1,86 @@ +using System; +using System.Collections.Generic; +using Grasshopper.Kernel; +using Rhino.Geometry; +using System.Linq; +using Plankton; +using PlanktonGh; + +/// +/// current state: +/// subdivision based on how many subdivision loops to run +/// +namespace PlanktonFold +{ + public class GhcSubdivisionQuad : GH_Component + { + + public GhcSubdivisionQuad() + : base("GhcSubdivisionQuad", "Subdivision", + "quad-subdivide a suface based on input point positions, so that the subdivided mesh has vertex on these points", + "MT", "Sofistik") + { + } + + protected override void RegisterInputParams(GH_Component.GH_InputParamManager pManager) + { + pManager.AddGenericParameter("Geometry", "Geometry", "Geometry", GH_ParamAccess.item); // for now a plankton mesh + pManager.AddPointParameter("Fix Points", "Fix Points", "Fix Points", GH_ParamAccess.list); + pManager.AddNumberParameter("Subdivide Count", "Subdivide Count", "Subdivide Count", GH_ParamAccess.item); + + //pManager.AddNumberParameter("Tolerance", "Tolerance", "Tolerance", GH_ParamAccess.item); + //pManager.AddNumberParameter("Max Length", "Max Length", "Max Length", GH_ParamAccess.item); + } + + protected override void RegisterOutputParams(GH_Component.GH_OutputParamManager pManager) + { + pManager.AddGenericParameter("Mesh", "Mesh", "Mesh", GH_ParamAccess.item); + } + + protected override void SolveInstance(IGH_DataAccess DA) + { + PlanktonMesh P = new PlanktonMesh(); + DA.GetData("Geometry", ref P); + List fixPoints = new List(); + DA.GetDataList("Fix Points", fixPoints); + double maxSubdivision = 0.0; + DA.GetData("Subdivide Count", ref maxSubdivision); + + // subdivide the mesh accorading to count + int count = 0; + do + { + P = RhinoSupport.QuadSubdivide(P); + count += 1; + + } while (count < maxSubdivision); + + // move + RhinoSupport.MoveVertices(P, fixPoints); + + Mesh M = RhinoSupport.ToRhinoMesh(P); + List meshFaces = M.Faces.ToList(); + + DA.SetData("Mesh", M); + + } + + protected override System.Drawing.Bitmap Icon + { + get + { + //You can add image files to your project resources and access them like this: + // return Resources.IconForThisComponent; + return null; + } + } + + /// + /// Gets the unique ID for this component. Do not change this ID after release. + /// + public override Guid ComponentGuid + { + get { return new Guid("{7b41bdaf-162f-4549-ae17-799c1b1710ee}"); } + } + } +} \ No newline at end of file diff --git a/src/PlanktonFold/GhcToRhinoMesh.cs b/src/PlanktonFold/GhcToRhinoMesh.cs new file mode 100644 index 0000000..ee8e91c --- /dev/null +++ b/src/PlanktonFold/GhcToRhinoMesh.cs @@ -0,0 +1,65 @@ +using System; +using System.Collections.Generic; + +using Grasshopper.Kernel; +using Rhino.Geometry; +using Plankton; +using PlanktonGh; +using PlanktonFold; + + +namespace PlanktonGh +{ + public class GhcToRhinoMesh : GH_Component + { + + public GhcToRhinoMesh() + : base("GhcToRhinoMesh", "GhcToRhinoMesh", + "convert a PlanktonMesh to RhinoMesh", + "MT", "Utility") + { + } + + + protected override void RegisterInputParams(GH_Component.GH_InputParamManager pManager) + { + pManager.AddGenericParameter("PlanktonMesh", "PlanktonMesh", "PlanktonMesh", GH_ParamAccess.item); + + } + + protected override void RegisterOutputParams(GH_Component.GH_OutputParamManager pManager) + { + pManager.AddMeshParameter("Rhino Mesh", "Rhino Mesh", "Rhino Mesh", GH_ParamAccess.item); + + } + + + protected override void SolveInstance(IGH_DataAccess DA) + { + PlanktonMesh P = new PlanktonMesh(); + DA.GetData("PlanktonMesh", ref P); + Mesh M = new Mesh(); + M = RhinoSupport.ToRhinoMesh(P); + DA.SetData("Rhino Mesh", M); + + } + + + protected override System.Drawing.Bitmap Icon + { + get + { + + return PlanktonFold.Properties.Resources.pmesh_to_mesh_07; + } + } + + /// + /// Gets the unique ID for this component. Do not change this ID after release. + /// + public override Guid ComponentGuid + { + get { return new Guid("{d8c26779-5a24-4047-bb74-e1d901c6a029}"); } + } + } +} \ No newline at end of file diff --git a/src/PlanktonFold/Math/Solver.cs b/src/PlanktonFold/Math/Solver.cs new file mode 100644 index 0000000..13d9fe4 --- /dev/null +++ b/src/PlanktonFold/Math/Solver.cs @@ -0,0 +1,100 @@ +using System; +using System.Numerics; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.Threading.Tasks; +using MathNet.Numerics; +using MathNet.Numerics.LinearAlgebra; +using MathNet.Numerics.LinearAlgebra.Double; +using Rhino.Geometry; + + + +namespace PlanktonFold +{ + public static class Solver + { + /// + /// fold line rotation matrix, input angle in radian + /// + /// + /// + public static Matrix C(double rho) + { + Matrix cMatrix = DenseMatrix.OfArray(new double[,] + { + {1, 0, 0}, + {0, Trig.Cos(rho), - Trig.Sin(rho)}, + {0, Trig.Sin(rho), Trig.Cos(rho)} + }); + return cMatrix; + } + + /// + /// sector angle matrix, input angle in radian + /// + /// + /// + public static Matrix B(double theta) // geometry angle + { + Matrix bMatrix = DenseMatrix.OfArray(new double[,] + { + {Trig.Cos(theta), - Trig.Sin(theta), 0}, + {Trig.Sin(theta), Trig.Cos(theta), 0}, + {0, 0, 1} + + }); + return bMatrix; + } + + /// + /// rotation matrix for a frame + /// + /// + /// + /// + public static Matrix Chi(Matrix c, Matrix b) + { + Matrix chi = c.Multiply(b); + return chi; + } + + /// + /// + /// + /// + /// + /// + public static Matrix F(List rhos, List thetas) + { + var M = Matrix.Build; + Matrix F = M.DenseIdentity(3); + + for (int i = 0; i < rhos.Count(); i++) + { + Matrix F_ = F; + F = F_.Multiply( Chi( C(rhos[i]), B(thetas[i]) )); + } + + return F; + } + + public static List GetVectors(Plane p, List rhos, List thetas) + { + List vectors = new List(); + + var M = Matrix.Build; + Matrix F = M.DenseIdentity(3); + + for (int i = 0; i < rhos.Count(); i++) + { + p.Transform(Transform.Rotation(rhos[i], p.XAxis, p.Origin)); + + F = F.Multiply(Chi(C(rhos[i]), B(thetas[i]))); + } + + return vectors; + } + } +} diff --git a/src/PlanktonFold/PlanktonFold.csproj b/src/PlanktonFold/PlanktonFold.csproj new file mode 100644 index 0000000..789ecaf --- /dev/null +++ b/src/PlanktonFold/PlanktonFold.csproj @@ -0,0 +1,151 @@ + + + + Debug32 + AnyCPU + 8.0.30703 + 2.0 + {066DA097-92CE-4E8D-BE88-8C875AFDC204} + Library + Properties + PlanktonFold + PlanktonFold + v4.5 + 512 + false + + + + true + full + false + bin\ + DEBUG;TRACE + prompt + 4 + + + true + full + false + ..\..\..\..\..\AppData\Roaming\Grasshopper\Libraries\ + DEBUG;TRACE + prompt + false + + + pdbonly + true + bin\ + TRACE + prompt + 4 + false + + + + ..\..\..\..\..\..\..\Program Files\Common Files\McNeel\Rhinoceros\5.0\Plug-ins\Grasshopper (b45a29b1-4343-4035-989e-044e8580d9cf)\0.9.76.0\GH_IO.dll + + + ..\..\..\..\..\..\..\Program Files\Common Files\McNeel\Rhinoceros\5.0\Plug-ins\Grasshopper (b45a29b1-4343-4035-989e-044e8580d9cf)\0.9.76.0\Grasshopper.dll + False + + + False + ..\..\..\..\..\AppData\Roaming\Grasshopper\Libraries\KangarooLib0099.dll + + + False + ..\..\..\..\..\AppData\Roaming\Grasshopper\Libraries\KangarooSolver.dll + + + ..\packages\MathNet.Numerics.3.16.0\lib\net40\MathNet.Numerics.dll + True + + + + + + + + + False + c:\Program Files (x86)\Rhinoceros 5\System\rhinocommon.dll + False + + + + + + + + + + + + + + + + True + True + Resources.resx + + + + + + + + {9c7ea9a3-331b-4574-bf22-b9d609de2f1e} + PlanktonGh + False + + + {bdd288f7-c2e2-4c2a-b083-e4d4d21f528f} + Plankton + False + + + + + ResXFileCodeGenerator + Resources.Designer.cs + + + + + + + + + + + + Copy "$(TargetPath)" "C:\Users\dyliu\AppData\Roaming\Grasshopper\Libraries\PlanktonFold.gha" + + + + + + Program + c:\Program Files (x86)\Rhinoceros 5\System\Rhino4.exe + false + + + en-US + + + c:\Program Files\Rhinoceros 5 (64-bit)\System\Rhino.exe + + + Program + false + + \ No newline at end of file diff --git a/src/PlanktonFold/PlanktonFoldInfo.cs b/src/PlanktonFold/PlanktonFoldInfo.cs new file mode 100644 index 0000000..3f89a22 --- /dev/null +++ b/src/PlanktonFold/PlanktonFoldInfo.cs @@ -0,0 +1,57 @@ +using System; +using System.Drawing; +using Grasshopper.Kernel; + +namespace PlanktonFold +{ + public class PlanktonFoldInfo : GH_AssemblyInfo + { + public override string Name + { + get + { + return "PlanktonFold"; + } + } + public override Bitmap Icon + { + get + { + //Return a 24x24 pixel bitmap to represent this GHA library. + return null; + } + } + public override string Description + { + get + { + //Return a short string describing the purpose of this GHA library. + return ""; + } + } + public override Guid Id + { + get + { + return new Guid("582fff2e-e6b5-44d3-a4b0-7ee164fa8087"); + } + } + + public override string AuthorName + { + get + { + //Return a string identifying you or your company. + return ""; + } + } + public override string AuthorContact + { + get + { + //Return a string representing your preferred contact details. + return ""; + } + } + } +} diff --git a/src/PlanktonFold/PneumaticFold.cs b/src/PlanktonFold/PneumaticFold.cs new file mode 100644 index 0000000..2a1a930 --- /dev/null +++ b/src/PlanktonFold/PneumaticFold.cs @@ -0,0 +1,71 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.Threading.Tasks; + +using Rhino; +using Rhino.Geometry; +using Grasshopper; +using Grasshopper.Kernel.Data; + +using Plankton; +using PlanktonFold; +using PlanktonGh; + +using KangarooLib; +using KangarooSolver; + +using MathNet.Numerics.LinearAlgebra; + +namespace PlanktonFold +{ + public class PneumaticFold + { + public Mesh Pattern; + public PlanktonMesh PMesh; + + // 几何 + public List ConstraintVertices; + public List> FoldLines; + public List> SectorAngle; + public List> FoldAngle; + + // + public List> FMatrix; // should be identical matrix + + + public PneumaticFold(Mesh M) + { + // plankton mesh prepared + PMesh = RhinoSupport.ToPlanktonMesh(Pattern); + PMesh.Faces.AssignFaceIndex(); + PMesh.Halfedges.AssignHalfEdgeIndex(); + PMesh.Vertices.AssignVertexIndex(); + + // assign MV for each inner halfedge + List innerEdges + = PMesh.Halfedges.ToList().Where(o => o.AdjacentFace != -1 && PMesh.Halfedges[PMesh.Halfedges.GetPairHalfedge(o.Index)].AdjacentFace != -1).ToList(); + + foreach (PlanktonHalfedge e in innerEdges) + e.MV = RhinoSupport.MVDetermination(PMesh, e.Index); + + // constraint vertices + ConstraintVertices = RhinoSupport.GetConstraintVertices(PMesh); + List cVertexIndices = RhinoSupport.GetConstraintVertexIndices(PMesh); + + DataTree neighbourEdges = new DataTree(); + for (int j = 0; j < cVertexIndices.Count(); j++) + { + GH_Path jPth = new GH_Path(j); + neighbourEdges.AddRange(RhinoSupport.NeighbourVertexEdges(PMesh, cVertexIndices[j]) + .Select(o => RhinoSupport.HalfEdgeToLine(PMesh, o)).ToList(), jPth); + } + + } + + + + + } +} diff --git a/src/PlanktonFold/Properties/AssemblyInfo.cs b/src/PlanktonFold/Properties/AssemblyInfo.cs new file mode 100644 index 0000000..5fd9504 --- /dev/null +++ b/src/PlanktonFold/Properties/AssemblyInfo.cs @@ -0,0 +1,38 @@ +using System.Reflection; +using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; +using Rhino.PlugIns; + + +// General Information about an assembly is controlled through the following +// set of attributes. Change these attribute values to modify the information +// associated with an assembly. +[assembly: AssemblyTitle("PlanktonFold")] +[assembly: AssemblyDescription("")] +[assembly: AssemblyConfiguration("")] +[assembly: AssemblyCompany("")] +[assembly: AssemblyProduct("PlanktonFold")] +[assembly: AssemblyCopyright("Copyright © 2016")] +[assembly: AssemblyTrademark("")] +[assembly: AssemblyCulture("")] + +// Setting ComVisible to false makes the types in this assembly not visible +// to COM components. If you need to access a type in this assembly from +// COM, set the ComVisible attribute to true on that type. +[assembly: ComVisible(false)] + +// The following GUID is for the ID of the typelib if this project is exposed to COM +[assembly: Guid("066da097-92ce-4e8d-be88-8c875afdc204")] // This will also be the Guid of the Rhino plug-in + +// Version information for an assembly consists of the following four values: +// +// Major Version +// Minor Version +// Build Number +// Revision +// +// You can specify all the values or you can default the Build and Revision Numbers +// by using the '*' as shown below: +// [assembly: AssemblyVersion("1.0.*")] +[assembly: AssemblyVersion("1.0.0.0")] +[assembly: AssemblyFileVersion("1.0.0.0")] diff --git a/src/PlanktonFold/Properties/Resources.Designer.cs b/src/PlanktonFold/Properties/Resources.Designer.cs new file mode 100644 index 0000000..a496527 --- /dev/null +++ b/src/PlanktonFold/Properties/Resources.Designer.cs @@ -0,0 +1,83 @@ +//------------------------------------------------------------------------------ +// +// This code was generated by a tool. +// Runtime Version:4.0.30319.42000 +// +// Changes to this file may cause incorrect behavior and will be lost if +// the code is regenerated. +// +//------------------------------------------------------------------------------ + +namespace PlanktonFold.Properties { + using System; + + + /// + /// A strongly-typed resource class, for looking up localized strings, etc. + /// + // This class was auto-generated by the StronglyTypedResourceBuilder + // class via a tool like ResGen or Visual Studio. + // To add or remove a member, edit your .ResX file then rerun ResGen + // with the /str option, or rebuild your VS project. + [global::System.CodeDom.Compiler.GeneratedCodeAttribute("System.Resources.Tools.StronglyTypedResourceBuilder", "4.0.0.0")] + [global::System.Diagnostics.DebuggerNonUserCodeAttribute()] + [global::System.Runtime.CompilerServices.CompilerGeneratedAttribute()] + internal class Resources { + + private static global::System.Resources.ResourceManager resourceMan; + + private static global::System.Globalization.CultureInfo resourceCulture; + + [global::System.Diagnostics.CodeAnalysis.SuppressMessageAttribute("Microsoft.Performance", "CA1811:AvoidUncalledPrivateCode")] + internal Resources() { + } + + /// + /// Returns the cached ResourceManager instance used by this class. + /// + [global::System.ComponentModel.EditorBrowsableAttribute(global::System.ComponentModel.EditorBrowsableState.Advanced)] + internal static global::System.Resources.ResourceManager ResourceManager { + get { + if (object.ReferenceEquals(resourceMan, null)) { + global::System.Resources.ResourceManager temp = new global::System.Resources.ResourceManager("PlanktonFold.Properties.Resources", typeof(Resources).Assembly); + resourceMan = temp; + } + return resourceMan; + } + } + + /// + /// Overrides the current thread's CurrentUICulture property for all + /// resource lookups using this strongly typed resource class. + /// + [global::System.ComponentModel.EditorBrowsableAttribute(global::System.ComponentModel.EditorBrowsableState.Advanced)] + internal static global::System.Globalization.CultureInfo Culture { + get { + return resourceCulture; + } + set { + resourceCulture = value; + } + } + + /// + /// Looks up a localized resource of type System.Drawing.Bitmap. + /// + internal static System.Drawing.Bitmap mesh_analysis_Icon_06__06 { + get { + object obj = ResourceManager.GetObject("mesh_analysis_Icon_06__06", resourceCulture); + return ((System.Drawing.Bitmap)(obj)); + } + } + + /// + /// Looks up a localized resource of type System.Drawing.Bitmap. + /// + internal static System.Drawing.Bitmap pmesh_to_mesh_07 { + get { + object obj = ResourceManager.GetObject("pmesh_to_mesh_07", resourceCulture); + return ((System.Drawing.Bitmap)(obj)); + } + } + } +} diff --git a/src/PlanktonFold/Properties/Resources.resx b/src/PlanktonFold/Properties/Resources.resx new file mode 100644 index 0000000..1c7ec9f --- /dev/null +++ b/src/PlanktonFold/Properties/Resources.resx @@ -0,0 +1,127 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + text/microsoft-resx + + + 2.0 + + + System.Resources.ResXResourceReader, System.Windows.Forms, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089 + + + System.Resources.ResXResourceWriter, System.Windows.Forms, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089 + + + + ..\Resources\mesh_analysis_Icon-06_-06.png;System.Drawing.Bitmap, System.Drawing, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a + + + ..\Resources\pmesh-to-mesh-07.png;System.Drawing.Bitmap, System.Drawing, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a + + \ No newline at end of file diff --git a/src/PlanktonFold/Resources/mesh_analysis_Icon-06_-06.png b/src/PlanktonFold/Resources/mesh_analysis_Icon-06_-06.png new file mode 100644 index 0000000..a793d48 Binary files /dev/null and b/src/PlanktonFold/Resources/mesh_analysis_Icon-06_-06.png differ diff --git a/src/PlanktonFold/Resources/pmesh-to-mesh-07.png b/src/PlanktonFold/Resources/pmesh-to-mesh-07.png new file mode 100644 index 0000000..b28b184 Binary files /dev/null and b/src/PlanktonFold/Resources/pmesh-to-mesh-07.png differ diff --git a/src/PlanktonGh/DecomposePlankton.cs b/src/PlanktonGh/DecomposePlankton.cs index 021fa9c..a84613d 100644 --- a/src/PlanktonGh/DecomposePlankton.cs +++ b/src/PlanktonGh/DecomposePlankton.cs @@ -1,118 +1,117 @@ -using System; -using System.Collections.Generic; - -using Grasshopper.Kernel; -using Rhino.Geometry; -using Plankton; - -namespace PlanktonGh -{ - public class DecomposePlankton : GH_Component - { - /// - /// Initializes a new instance of the DecomposePlankton class. - /// - public DecomposePlankton() - : base("DeconstructPlankton", "DeconstructPlankton", - "Decompose a plankton mesh into its topology information", - "Mesh", "Triangulation") - { - } - - /// - /// Registers all the input parameters for this component. - /// - protected override void RegisterInputParams(GH_Component.GH_InputParamManager pManager) - { - pManager.AddParameter(new GH_PlanktonMeshParam(), "PMesh", "PMesh", "The input PlanktonMesh to decompose", GH_ParamAccess.item); - } - - /// - /// Registers all the output parameters for this component. - /// - protected override void RegisterOutputParams(GH_Component.GH_OutputParamManager pManager) - { - pManager.Register_PointParam("Vertex_Points", "V", "Vertex point positions", GH_ParamAccess.list); - pManager.Register_IntegerParam("Vertex_Outgoing_Halfedge", "V_He", "One of the outgoing halfedges for each vertex", GH_ParamAccess.list); - pManager.Register_IntegerParam("Halfedge_StartVertex", "He_V", "The starting vertex of each halfedge", GH_ParamAccess.list); - pManager.Register_IntegerParam("Halfedge_AdjacentFace", "He_F", "The face bordered by each halfedge (or -1 if it is adjacent to a boundary)", GH_ParamAccess.list); - pManager.Register_IntegerParam("Halfedge_NextHalfedge", "He_Nxt", "The next halfedge around the same face", GH_ParamAccess.list); - pManager.Register_IntegerParam("Halfedge_PrevHalfedge", "He_Prv", "The previous halfedge around the same face", GH_ParamAccess.list); - pManager.Register_IntegerParam("Halfedge_Pair", "He_P", "The halfedge joining the same 2 vertices in the opposite direction", GH_ParamAccess.list); - pManager.Register_IntegerParam("Face_Halfedge", "F_He", "The first halfedge of each face", GH_ParamAccess.list); - } - - /// - /// This is the method that actually does the work. - /// - /// The DA object is used to retrieve from inputs and store in outputs. - protected override void SolveInstance(IGH_DataAccess DA) - { - PlanktonMesh P = null; - if (!DA.GetData(0, ref P)) return; - - List Positions = new List(); - List OutHEdge = new List(); - - foreach (PlanktonVertex v in P.Vertices) - { - Positions.Add(new Point3f(v.X, v.Y, v.Z)); - OutHEdge.Add(v.OutgoingHalfedge); - } - - List StartV = new List(); - List AdjF = new List(); - List Next = new List(); - List Prev = new List(); - List Pair = new List(); - - for (int i = 0; i < P.Halfedges.Count; i++) - { - StartV.Add(P.Halfedges[i].StartVertex); - AdjF.Add(P.Halfedges[i].AdjacentFace); - Next.Add(P.Halfedges[i].NextHalfedge); - Prev.Add(P.Halfedges[i].PrevHalfedge); - Pair.Add(P.Halfedges.GetPairHalfedge(i)); - } - - List FaceEdge = new List(); - for (int i = 0; i < P.Faces.Count; i++) - { - FaceEdge.Add(P.Faces[i].FirstHalfedge); - } - - DA.SetDataList(0, Positions); - DA.SetDataList(1, OutHEdge); - - DA.SetDataList(2, StartV); - DA.SetDataList(3, AdjF); - DA.SetDataList(4, Next); - DA.SetDataList(5, Prev); - DA.SetDataList(6, Pair); - - DA.SetDataList(7, FaceEdge); - - } - - /// - /// Provides an Icon for the component. - /// - protected override System.Drawing.Bitmap Icon - { - get - { - //You can add image files to your project resources and access them like this: - // return Resources.IconForThisComponent; - return PlanktonGh.Properties.Resources.plankton_decon; - } - } - - /// - /// Gets the unique ID for this component. Do not change this ID after release. - /// - public override Guid ComponentGuid - { - get { return new Guid("{97c28a7c-5d5a-4b3d-a935-b8730e88749b}"); } - } - } +using System; +using System.Collections.Generic; +using Grasshopper.Kernel; +using Rhino.Geometry; +using Plankton; + +namespace PlanktonGh +{ + public class DecomposePlankton : GH_Component + { + /// + /// Initializes a new instance of the DecomposePlankton class. + /// + public DecomposePlankton() + : base("DeconstructPlankton", "DeconstructPlankton", + "Decompose a plankton mesh into its topology information", + "Mesh", "Triangulation") + { + } + + /// + /// Registers all the input parameters for this component. + /// + protected override void RegisterInputParams(GH_Component.GH_InputParamManager pManager) + { + pManager.AddParameter(new GH_PlanktonMeshParam(), "PMesh", "PMesh", "The input PlanktonMesh to decompose", GH_ParamAccess.item); + } + + /// + /// Registers all the output parameters for this component. + /// + protected override void RegisterOutputParams(GH_Component.GH_OutputParamManager pManager) + { + pManager.Register_PointParam("Vertex_Points", "V", "Vertex point positions", GH_ParamAccess.list); + pManager.Register_IntegerParam("Vertex_Outgoing_Halfedge", "V_He", "One of the outgoing halfedges for each vertex", GH_ParamAccess.list); + pManager.Register_IntegerParam("Halfedge_StartVertex", "He_V", "The starting vertex of each halfedge", GH_ParamAccess.list); + pManager.Register_IntegerParam("Halfedge_AdjacentFace", "He_F", "The face bordered by each halfedge (or -1 if it is adjacent to a boundary)", GH_ParamAccess.list); + pManager.Register_IntegerParam("Halfedge_NextHalfedge", "He_Nxt", "The next halfedge around the same face", GH_ParamAccess.list); + pManager.Register_IntegerParam("Halfedge_PrevHalfedge", "He_Prv", "The previous halfedge around the same face", GH_ParamAccess.list); + pManager.Register_IntegerParam("Halfedge_Pair", "He_P", "The halfedge joining the same 2 vertices in the opposite direction", GH_ParamAccess.list); + pManager.Register_IntegerParam("Face_Halfedge", "F_He", "The first halfedge of each face", GH_ParamAccess.list); + } + + /// + /// This is the method that actually does the work. + /// + /// The DA object is used to retrieve from inputs and store in outputs. + protected override void SolveInstance(IGH_DataAccess DA) + { + PlanktonMesh P = null; + if (!DA.GetData(0, ref P)) return; + + List Positions = new List(); + List OutHEdge = new List(); + + foreach (PlanktonVertex v in P.Vertices) + { + Positions.Add(new Point3f(v.X, v.Y, v.Z)); + OutHEdge.Add(v.OutgoingHalfedge); + } + + List StartV = new List(); + List AdjF = new List(); + List Next = new List(); + List Prev = new List(); + List Pair = new List(); + + for (int i = 0; i < P.Halfedges.Count; i++) + { + StartV.Add(P.Halfedges[i].StartVertex); + AdjF.Add(P.Halfedges[i].AdjacentFace); + Next.Add(P.Halfedges[i].NextHalfedge); + Prev.Add(P.Halfedges[i].PrevHalfedge); + Pair.Add(P.Halfedges.GetPairHalfedge(i)); + } + + List FaceEdge = new List(); + for (int i = 0; i < P.Faces.Count; i++) + { + FaceEdge.Add(P.Faces[i].FirstHalfedge); + } + + DA.SetDataList(0, Positions); + DA.SetDataList(1, OutHEdge); + + DA.SetDataList(2, StartV); + DA.SetDataList(3, AdjF); + DA.SetDataList(4, Next); + DA.SetDataList(5, Prev); + DA.SetDataList(6, Pair); + + DA.SetDataList(7, FaceEdge); + + } + + /// + /// Provides an Icon for the component. + /// + protected override System.Drawing.Bitmap Icon + { + get + { + //You can add image files to your project resources and access them like this: + // return Resources.IconForThisComponent; + return PlanktonGh.Properties.Resources.plankton_decon; + } + } + + /// + /// Gets the unique ID for this component. Do not change this ID after release. + /// + public override Guid ComponentGuid + { + get { return new Guid("{97c28a7c-5d5a-4b3d-a935-b8730e88749b}"); } + } + } } \ No newline at end of file diff --git a/src/PlanktonGh/PlanktonGh.csproj b/src/PlanktonGh/PlanktonGh.csproj index 80b9d8b..57f5ace 100644 --- a/src/PlanktonGh/PlanktonGh.csproj +++ b/src/PlanktonGh/PlanktonGh.csproj @@ -1,108 +1,120 @@ - - - - Debug - AnyCPU - 8.0.30703 - 2.0 - {9C7EA9A3-331B-4574-BF22-B9D609DE2F1E} - Library - Properties - PlanktonGh - PlanktonGh - v4.0 - 512 - False - OnBuildSuccess - - - true - full - false - ..\..\bin\Debug\ - DEBUG;TRACE - prompt - 4 - - - pdbonly - true - ..\..\bin\Release\ - TRACE - prompt - 4 - ..\..\bin\Release\PlanktonGh.xml - - - - ..\..\lib\GH_IO.dll - False - - - ..\..\lib\Grasshopper.dll - False - - - ..\..\lib\RhinoCommon.dll - False - - - - - - - - - - - - - - - - - True - True - Resources.resx - - - - - - {BDD288F7-C2E2-4C2A-B083-E4D4D21F528F} - Plankton - False - - - - - ResXFileCodeGenerator - Resources.Designer.cs - - - - - - - - - - - - - - - Copy "$(TargetPath)" "$(TargetDir)$(SolutionName).gha" - - - cp "$(TargetPath)" "$(TargetDir)$(SolutionName).gha" - - - - + + + + Debug + AnyCPU + 8.0.30703 + 2.0 + {9C7EA9A3-331B-4574-BF22-B9D609DE2F1E} + Library + Properties + PlanktonGh + PlanktonGh + v4.5 + 512 + OnBuildSuccess + + + + true + full + false + ..\..\..\..\..\AppData\Roaming\Grasshopper\Libraries\ + DEBUG;TRACE + prompt + 4 + false + + + pdbonly + true + ..\..\bin\Release\ + TRACE + prompt + 4 + ..\..\bin\Release\PlanktonGh.xml + false + + + + ..\..\..\..\..\..\..\Program Files\Common Files\McNeel\Rhinoceros\5.0\Plug-ins\Grasshopper (b45a29b1-4343-4035-989e-044e8580d9cf)\0.9.76.0\GH_IO.dll + False + + + ..\..\..\..\..\..\..\Program Files\Common Files\McNeel\Rhinoceros\5.0\Plug-ins\Grasshopper (b45a29b1-4343-4035-989e-044e8580d9cf)\0.9.76.0\Grasshopper.dll + False + + + ..\packages\MathNet.Numerics.3.16.0\lib\net40\MathNet.Numerics.dll + True + + + ..\..\..\..\..\..\..\Program Files\Rhinoceros 5 (64-bit)\System\RhinoCommon.dll + False + + + + + + + + + + + + + + + + + + True + True + Resources.resx + + + + + + {BDD288F7-C2E2-4C2A-B083-E4D4D21F528F} + Plankton + False + + + + + ResXFileCodeGenerator + Resources.Designer.cs + + + + + + + + + + + + + + + + + Copy "$(TargetPath)" "$(TargetDir)$(SolutionName).gha" + + + cp "$(TargetPath)" "$(TargetDir)$(SolutionName).gha" + + + + Copy "$(TargetPath)" "C:\Users\dyliu\AppData\Roaming\Grasshopper\Libraries\PlanktonGh.gha" + + + \ No newline at end of file diff --git a/src/PlanktonGh/PlanktonGhDiagram.cd b/src/PlanktonGh/PlanktonGhDiagram.cd new file mode 100644 index 0000000..e0b202f --- /dev/null +++ b/src/PlanktonGh/PlanktonGhDiagram.cd @@ -0,0 +1,11 @@ + + + + + + QCIAAAAAECAAAAAQAAACACAAAAAACAgkCAAAACAAAAA= + RhinoSupport.cs + + + + \ No newline at end of file diff --git a/src/PlanktonGh/Properties/Resources.Designer.cs b/src/PlanktonGh/Properties/Resources.Designer.cs index feb7c38..e8c0f6d 100644 --- a/src/PlanktonGh/Properties/Resources.Designer.cs +++ b/src/PlanktonGh/Properties/Resources.Designer.cs @@ -1,103 +1,103 @@ -//------------------------------------------------------------------------------ -// -// This code was generated by a tool. -// Runtime Version:4.0.30319.18444 -// -// Changes to this file may cause incorrect behavior and will be lost if -// the code is regenerated. -// -//------------------------------------------------------------------------------ - -namespace PlanktonGh.Properties { - using System; - - - /// - /// A strongly-typed resource class, for looking up localized strings, etc. - /// - // This class was auto-generated by the StronglyTypedResourceBuilder - // class via a tool like ResGen or Visual Studio. - // To add or remove a member, edit your .ResX file then rerun ResGen - // with the /str option, or rebuild your VS project. - [global::System.CodeDom.Compiler.GeneratedCodeAttribute("System.Resources.Tools.StronglyTypedResourceBuilder", "4.0.0.0")] - [global::System.Diagnostics.DebuggerNonUserCodeAttribute()] - [global::System.Runtime.CompilerServices.CompilerGeneratedAttribute()] - internal class Resources { - - private static global::System.Resources.ResourceManager resourceMan; - - private static global::System.Globalization.CultureInfo resourceCulture; - - [global::System.Diagnostics.CodeAnalysis.SuppressMessageAttribute("Microsoft.Performance", "CA1811:AvoidUncalledPrivateCode")] - internal Resources() { - } - - /// - /// Returns the cached ResourceManager instance used by this class. - /// - [global::System.ComponentModel.EditorBrowsableAttribute(global::System.ComponentModel.EditorBrowsableState.Advanced)] - internal static global::System.Resources.ResourceManager ResourceManager { - get { - if (object.ReferenceEquals(resourceMan, null)) { - global::System.Resources.ResourceManager temp = new global::System.Resources.ResourceManager("PlanktonGh.Properties.Resources", typeof(Resources).Assembly); - resourceMan = temp; - } - return resourceMan; - } - } - - /// - /// Overrides the current thread's CurrentUICulture property for all - /// resource lookups using this strongly typed resource class. - /// - [global::System.ComponentModel.EditorBrowsableAttribute(global::System.ComponentModel.EditorBrowsableState.Advanced)] - internal static global::System.Globalization.CultureInfo Culture { - get { - return resourceCulture; - } - set { - resourceCulture = value; - } - } - - /// - /// Looks up a localized resource of type System.Drawing.Bitmap. - /// - internal static System.Drawing.Bitmap plankton { - get { - object obj = ResourceManager.GetObject("plankton", resourceCulture); - return ((System.Drawing.Bitmap)(obj)); - } - } - - /// - /// Looks up a localized resource of type System.Drawing.Bitmap. - /// - internal static System.Drawing.Bitmap plankton_decon { - get { - object obj = ResourceManager.GetObject("plankton_decon", resourceCulture); - return ((System.Drawing.Bitmap)(obj)); - } - } - - /// - /// Looks up a localized resource of type System.Drawing.Bitmap. - /// - internal static System.Drawing.Bitmap plankton_param { - get { - object obj = ResourceManager.GetObject("plankton_param", resourceCulture); - return ((System.Drawing.Bitmap)(obj)); - } - } - - /// - /// Looks up a localized resource of type System.Drawing.Bitmap. - /// - internal static System.Drawing.Bitmap plankton_verts { - get { - object obj = ResourceManager.GetObject("plankton_verts", resourceCulture); - return ((System.Drawing.Bitmap)(obj)); - } - } - } -} +//------------------------------------------------------------------------------ +// +// This code was generated by a tool. +// Runtime Version:4.0.30319.42000 +// +// Changes to this file may cause incorrect behavior and will be lost if +// the code is regenerated. +// +//------------------------------------------------------------------------------ + +namespace PlanktonGh.Properties { + using System; + + + /// + /// A strongly-typed resource class, for looking up localized strings, etc. + /// + // This class was auto-generated by the StronglyTypedResourceBuilder + // class via a tool like ResGen or Visual Studio. + // To add or remove a member, edit your .ResX file then rerun ResGen + // with the /str option, or rebuild your VS project. + [global::System.CodeDom.Compiler.GeneratedCodeAttribute("System.Resources.Tools.StronglyTypedResourceBuilder", "4.0.0.0")] + [global::System.Diagnostics.DebuggerNonUserCodeAttribute()] + [global::System.Runtime.CompilerServices.CompilerGeneratedAttribute()] + internal class Resources { + + private static global::System.Resources.ResourceManager resourceMan; + + private static global::System.Globalization.CultureInfo resourceCulture; + + [global::System.Diagnostics.CodeAnalysis.SuppressMessageAttribute("Microsoft.Performance", "CA1811:AvoidUncalledPrivateCode")] + internal Resources() { + } + + /// + /// Returns the cached ResourceManager instance used by this class. + /// + [global::System.ComponentModel.EditorBrowsableAttribute(global::System.ComponentModel.EditorBrowsableState.Advanced)] + internal static global::System.Resources.ResourceManager ResourceManager { + get { + if (object.ReferenceEquals(resourceMan, null)) { + global::System.Resources.ResourceManager temp = new global::System.Resources.ResourceManager("PlanktonGh.Properties.Resources", typeof(Resources).Assembly); + resourceMan = temp; + } + return resourceMan; + } + } + + /// + /// Overrides the current thread's CurrentUICulture property for all + /// resource lookups using this strongly typed resource class. + /// + [global::System.ComponentModel.EditorBrowsableAttribute(global::System.ComponentModel.EditorBrowsableState.Advanced)] + internal static global::System.Globalization.CultureInfo Culture { + get { + return resourceCulture; + } + set { + resourceCulture = value; + } + } + + /// + /// Looks up a localized resource of type System.Drawing.Bitmap. + /// + internal static System.Drawing.Bitmap plankton { + get { + object obj = ResourceManager.GetObject("plankton", resourceCulture); + return ((System.Drawing.Bitmap)(obj)); + } + } + + /// + /// Looks up a localized resource of type System.Drawing.Bitmap. + /// + internal static System.Drawing.Bitmap plankton_decon { + get { + object obj = ResourceManager.GetObject("plankton_decon", resourceCulture); + return ((System.Drawing.Bitmap)(obj)); + } + } + + /// + /// Looks up a localized resource of type System.Drawing.Bitmap. + /// + internal static System.Drawing.Bitmap plankton_param { + get { + object obj = ResourceManager.GetObject("plankton_param", resourceCulture); + return ((System.Drawing.Bitmap)(obj)); + } + } + + /// + /// Looks up a localized resource of type System.Drawing.Bitmap. + /// + internal static System.Drawing.Bitmap plankton_verts { + get { + object obj = ResourceManager.GetObject("plankton_verts", resourceCulture); + return ((System.Drawing.Bitmap)(obj)); + } + } + } +} diff --git a/src/PlanktonGh/RhinoSupport.cs b/src/PlanktonGh/RhinoSupport.cs index 3248796..2c1ab51 100644 --- a/src/PlanktonGh/RhinoSupport.cs +++ b/src/PlanktonGh/RhinoSupport.cs @@ -1,417 +1,1541 @@ -using Plankton; -using Rhino.Geometry; -using System; -using System.Collections.Generic; -using System.Linq; - -namespace PlanktonGh -{ - /// - /// Provides static and extension methods to add support for Rhino geometry in . - /// - public static class RhinoSupport - { - public static string HelloWorld() - { - return "Hello World!"; - } - - /// - /// Creates a Plankton halfedge mesh from a Rhino mesh. - /// Uses the topology of the Rhino mesh directly. - /// - /// A which represents the topology and geometry of the source mesh. - /// A Rhino mesh to convert from. - public static PlanktonMesh ToPlanktonMesh(this Mesh source) - { - PlanktonMesh pMesh = new PlanktonMesh(); - - source.Vertices.CombineIdentical(true, true); - source.Vertices.CullUnused(); - source.UnifyNormals(); - source.Weld(Math.PI); - - foreach (Point3f v in source.TopologyVertices) - { - pMesh.Vertices.Add(v.X, v.Y, v.Z); - } - - for (int i = 0; i < source.Faces.Count; i++) - { - pMesh.Faces.Add(new PlanktonFace()); - } - - for (int i = 0; i < source.TopologyEdges.Count; i++) - { - PlanktonHalfedge HalfA = new PlanktonHalfedge(); - - HalfA.StartVertex = source.TopologyEdges.GetTopologyVertices(i).I; - - if (pMesh.Vertices [HalfA.StartVertex].OutgoingHalfedge == -1) { - pMesh.Vertices [HalfA.StartVertex].OutgoingHalfedge = pMesh.Halfedges.Count; - } - - PlanktonHalfedge HalfB = new PlanktonHalfedge(); - - HalfB.StartVertex = source.TopologyEdges.GetTopologyVertices(i).J; - - if (pMesh.Vertices [HalfB.StartVertex].OutgoingHalfedge == -1) { - pMesh.Vertices [HalfB.StartVertex].OutgoingHalfedge = pMesh.Halfedges.Count + 1; - } - - bool[] Match; - int[] ConnectedFaces = source.TopologyEdges.GetConnectedFaces(i, out Match); - - //Note for Steve Baer : This Match bool doesn't seem to work on triangulated meshes - it often returns true - //for both faces, even for a properly oriented manifold mesh, which can't be right - //So - making our own check for matching: - //(I suspect the problem is related to C being the same as D for triangles, so best to - //deal with them separately just to make sure) - //loop through the vertices of the face until finding the one which is the same as the start of the edge - //iff the next vertex around the face is the end of the edge then it matches. - - Match[0] = false; - if (Match.Length > 1) - {Match[1] = true;} - - int VertA = source.TopologyVertices.TopologyVertexIndex(source.Faces[ConnectedFaces[0]].A); - int VertB = source.TopologyVertices.TopologyVertexIndex(source.Faces[ConnectedFaces[0]].B); - int VertC = source.TopologyVertices.TopologyVertexIndex(source.Faces[ConnectedFaces[0]].C); - int VertD = source.TopologyVertices.TopologyVertexIndex(source.Faces[ConnectedFaces[0]].D); - - if ((VertA == source.TopologyEdges.GetTopologyVertices(i).I) - && (VertB == source.TopologyEdges.GetTopologyVertices(i).J)) - { Match[0] = true; - } - if ((VertB == source.TopologyEdges.GetTopologyVertices(i).I) - && (VertC == source.TopologyEdges.GetTopologyVertices(i).J)) - { - Match[0] = true; - } - if ((VertC == source.TopologyEdges.GetTopologyVertices(i).I) - && (VertD == source.TopologyEdges.GetTopologyVertices(i).J)) - { - Match[0] = true; - } - if ((VertD == source.TopologyEdges.GetTopologyVertices(i).I) - && (VertA == source.TopologyEdges.GetTopologyVertices(i).J)) - { - Match[0] = true; - } - //I don't think these next 2 should ever be needed, but just in case: - if ((VertC == source.TopologyEdges.GetTopologyVertices(i).I) - && (VertA == source.TopologyEdges.GetTopologyVertices(i).J)) - { - Match[0] = true; - } - if ((VertB == source.TopologyEdges.GetTopologyVertices(i).I) - && (VertD == source.TopologyEdges.GetTopologyVertices(i).J)) - { - Match[0] = true; - } - - if (Match[0] == true) - { - HalfA.AdjacentFace = ConnectedFaces[0]; - if (pMesh.Faces[HalfA.AdjacentFace].FirstHalfedge == -1) { - pMesh.Faces[HalfA.AdjacentFace].FirstHalfedge = pMesh.Halfedges.Count; - } - if (ConnectedFaces.Length > 1) - { - HalfB.AdjacentFace = ConnectedFaces[1]; - if (pMesh.Faces[HalfB.AdjacentFace].FirstHalfedge == -1) { - pMesh.Faces[HalfB.AdjacentFace].FirstHalfedge = pMesh.Halfedges.Count + 1; - } - } - else - { - HalfB.AdjacentFace = -1; - pMesh.Vertices[HalfB.StartVertex].OutgoingHalfedge = pMesh.Halfedges.Count + 1; - } - } - else - { - HalfB.AdjacentFace = ConnectedFaces[0]; - - if (pMesh.Faces[HalfB.AdjacentFace].FirstHalfedge == -1) { - pMesh.Faces[HalfB.AdjacentFace].FirstHalfedge = pMesh.Halfedges.Count + 1; - } - - if (ConnectedFaces.Length > 1) - { - HalfA.AdjacentFace = ConnectedFaces[1]; - - if (pMesh.Faces[HalfA.AdjacentFace].FirstHalfedge == -1) { - pMesh.Faces[HalfA.AdjacentFace].FirstHalfedge = pMesh.Halfedges.Count; - } - } - else - { - HalfA.AdjacentFace = -1; - pMesh.Vertices[HalfA.StartVertex].OutgoingHalfedge = pMesh.Halfedges.Count; - } - } - pMesh.Halfedges.Add(HalfA); - pMesh.Halfedges.Add(HalfB); - } - - for (int i = 0; i < (pMesh.Halfedges.Count); i += 2) - { - int[] EndNeighbours = source.TopologyVertices.ConnectedTopologyVertices(pMesh.Halfedges[i + 1].StartVertex, true); - for (int j = 0; j < EndNeighbours.Length; j++) - { - if(EndNeighbours[j] == pMesh.Halfedges[i].StartVertex) - { - int EndOfNextHalfedge = EndNeighbours[(j - 1 + EndNeighbours.Length) % EndNeighbours.Length]; - int StartOfPrevOfPairHalfedge = EndNeighbours[(j + 1) % EndNeighbours.Length]; - - int NextEdge = source.TopologyEdges.GetEdgeIndex(pMesh.Halfedges[i + 1].StartVertex,EndOfNextHalfedge); - int PrevPairEdge = source.TopologyEdges.GetEdgeIndex(pMesh.Halfedges[i + 1].StartVertex,StartOfPrevOfPairHalfedge); - - if (source.TopologyEdges.GetTopologyVertices(NextEdge).I == pMesh.Halfedges[i + 1].StartVertex) { - pMesh.Halfedges[i].NextHalfedge = NextEdge * 2; - } else { - pMesh.Halfedges[i].NextHalfedge = NextEdge * 2 + 1; - } - - if (source.TopologyEdges.GetTopologyVertices(PrevPairEdge).J == pMesh.Halfedges[i + 1].StartVertex) { - pMesh.Halfedges[i + 1].PrevHalfedge = PrevPairEdge * 2; - } else { - pMesh.Halfedges[i + 1].PrevHalfedge = PrevPairEdge * 2+1; - } - break; - } - } - - int[] StartNeighbours = source.TopologyVertices.ConnectedTopologyVertices(pMesh.Halfedges[i].StartVertex, true); - for (int j = 0; j < StartNeighbours.Length; j++) - { - if (StartNeighbours[j] == pMesh.Halfedges[i+1].StartVertex) - { - int EndOfNextOfPairHalfedge = StartNeighbours[(j - 1 + StartNeighbours.Length) % StartNeighbours.Length]; - int StartOfPrevHalfedge = StartNeighbours[(j + 1) % StartNeighbours.Length]; - - int NextPairEdge = source.TopologyEdges.GetEdgeIndex(pMesh.Halfedges[i].StartVertex, EndOfNextOfPairHalfedge); - int PrevEdge = source.TopologyEdges.GetEdgeIndex(pMesh.Halfedges[i].StartVertex, StartOfPrevHalfedge); - - if (source.TopologyEdges.GetTopologyVertices(NextPairEdge).I == pMesh.Halfedges[i].StartVertex) { - pMesh.Halfedges[i + 1].NextHalfedge = NextPairEdge * 2; - } else { - pMesh.Halfedges[i + 1].NextHalfedge = NextPairEdge * 2 + 1; - } - - if (source.TopologyEdges.GetTopologyVertices(PrevEdge).J == pMesh.Halfedges[i].StartVertex) { - pMesh.Halfedges[i].PrevHalfedge = PrevEdge * 2; - } else { - pMesh.Halfedges[i].PrevHalfedge = PrevEdge * 2 + 1; - } - break; - } - } - } - - return pMesh; - } - - /// - /// Creates a Rhino mesh from a Plankton halfedge mesh. - /// Uses the face-vertex information available in the halfedge data structure. - /// - /// A which represents the source mesh (as best it can). - /// A Plankton mesh to convert from. - /// Any faces with five sides or more will be triangulated. - public static Mesh ToRhinoMesh(this PlanktonMesh source) - { - // could add different options for triangulating ngons later - Mesh rMesh = new Mesh(); - foreach (PlanktonVertex v in source.Vertices) - { - rMesh.Vertices.Add(v.X, v.Y, v.Z); - } - for (int i = 0; i < source.Faces.Count; i++) - { - int[] fvs = source.Faces.GetFaceVertices(i); - if (fvs.Length == 3) - { - rMesh.Faces.AddFace(fvs[0], fvs[1], fvs[2]); - } - else if (fvs.Length == 4) - { - rMesh.Faces.AddFace(fvs[0], fvs[1], fvs[2], fvs[3]); - } - else if (fvs.Length > 4) - { - // triangulate about face center (fan) - var fc = source.Faces.GetFaceCenter(i); - rMesh.Vertices.Add(fc.X, fc.Y, fc.Z); - for (int j = 0; j < fvs.Length; j++) - { - rMesh.Faces.AddFace(fvs[j], fvs[(j + 1) % fvs.Length], rMesh.Vertices.Count - 1); - } - } - } - rMesh.Normals.ComputeNormals(); - return rMesh; - } - - /// - /// Replaces the vertices of a PlanktonMesh with a new list of points - /// - /// A list of closed polylines representing the boundary edges of each face. - /// A Plankton mesh. - /// A list of points. - public static PlanktonMesh ReplaceVertices(this PlanktonMesh source, List points) - { - PlanktonMesh pMesh = source; - for (int i = 0; i < points.Count; i++) - { - pMesh.Vertices.SetVertex(i, points[i]); - } - return pMesh; - } - - /// - /// Converts each face to a closed polyline. - /// - /// A list of closed polylines representing the boundary edges of each face. - /// A Plankton mesh. - public static Polyline[] ToPolylines(this PlanktonMesh source) - { - int n = source.Faces.Count; - Polyline[] polylines = new Polyline[n]; - for (int i = 0; i < n; i++) - { - Polyline facePoly = new Polyline(); - int[] vs = source.Faces.GetFaceVertices(i); - for (int j = 0; j <= vs.Length; j++) - { - var v = source.Vertices[vs[j % vs.Length]]; - facePoly.Add(v.X, v.Y, v.Z); - } - polylines[i] = facePoly; - } - - return polylines; - } - - /// - /// Creates a Rhino Point3f from a Plankton vertex. - /// - /// A Plankton vertex - /// A Point3f with the same coordinates as the vertex. - public static Point3f ToPoint3f(this PlanktonVertex vertex) - { - return new Point3f(vertex.X, vertex.Y, vertex.Z); - } - - /// - /// Creates a Rhino Point3d from a Plankton vertex. - /// - /// A Plankton vertex - /// A Point3d with the same coordinates as the vertex. - public static Point3d ToPoint3d(this PlanktonVertex vertex) - { - return new Point3d(vertex.X, vertex.Y, vertex.Z); - } - - /// - /// Creates a Rhino Point3f from a Plankton vector. - /// - /// A Plankton vector. - /// A Point3f with the same XYZ components as the vector. - public static Point3f ToPoint3f(this PlanktonXYZ vector) - { - return new Point3f(vector.X, vector.Y, vector.Z); - } - - /// - /// Creates a Rhino Point3d from a Plankton vector. - /// - /// A Plankton vector. - /// A Point3d with the same XYZ components as the vector. - public static Point3d ToPoint3d(this PlanktonXYZ vector) - { - return new Point3d(vector.X, vector.Y, vector.Z); - } - - /// - /// Creates a Rhino Vector3f from a Plankton vector. - /// - /// A Plankton vector. - /// A Vector3f with the same XYZ components as the vector. - public static Vector3f ToVector3f(this PlanktonXYZ vector) - { - return new Vector3f(vector.X, vector.Y, vector.Z); - } - - /// - /// Sets or adds a vertex to the Vertex List. - /// If [index] is less than [Count], the existing vertex at [index] will be modified. - /// If [index] equals [Count], a new vertex is appended to the end of the vertex list. - /// If [index] is larger than [Count], the function will return false. - /// - /// Index of vertex to set. - /// Vertex location. - /// true on success, false on failure. - public static bool SetVertex(this PlanktonVertexList vertexList, int index, Point3f vertex) - { - return vertexList.SetVertex(index, vertex.X, vertex.Y, vertex.Z); - } - - /// - /// Sets or adds a vertex to the Vertex List. - /// If [index] is less than [Count], the existing vertex at [index] will be modified. - /// If [index] equals [Count], a new vertex is appended to the end of the vertex list. - /// If [index] is larger than [Count], the function will return false. - /// - /// Index of vertex to set. - /// Vertex location. - /// true on success, false on failure. - public static bool SetVertex(this PlanktonVertexList vertexList, int index, Point3d vertex) - { - return vertexList.SetVertex(index, vertex.X, vertex.Y, vertex.Z); - } - - /// - /// Moves a vertex by a vector. - /// - /// Index of vertex to move. - /// Vector to move by. - /// true on success, false on failure. - public static bool MoveVertex(this PlanktonVertexList vertexList, int index, Vector3d vector) - { - return vertexList.SetVertex(index, vertexList[index].X + vector.X, vertexList[index].Y + vector.Y, vertexList[index].Z + vector.Z); - } - - /// - /// Adds a new vertex to the end of the Vertex list. - /// - /// Location of new vertex. - /// The index of the newly added vertex. - public static int Add(this PlanktonVertexList vertexList, Point3f vertex) - { - return vertexList.Add(vertex.X, vertex.Y, vertex.Z); - } - - /// - /// Adds a new vertex to the end of the Vertex list. - /// - /// Location of new vertex. - /// The index of the newly added vertex. - public static int Add(this PlanktonVertexList vertexList, Point3d vertex) - { - return vertexList.Add(vertex.X, vertex.Y, vertex.Z); - } - - /// - /// Gets positions of vertices - /// - /// A list of Point3d - /// A Plankton mesh. - public static IEnumerable GetPositions(this PlanktonMesh source) - { - return Enumerable.Range(0, source.Vertices.Count).Select(i => source.Vertices[i].ToPoint3d()); - } - } -} - +using Plankton; +using Rhino.Geometry; +using System; +using System.Collections.Generic; +using System.Linq; +using Grasshopper.Kernel.Types; +using MathNet.Numerics.LinearAlgebra; + +namespace PlanktonGh +{ + /// + /// Provides static and extension methods to add support for Rhino geometry in . + /// + static public class RhinoSupport + { + public static string HelloWorld() + { + return "Hello World!"; + } + + /// + /// Creates a Plankton halfedge mesh from a Rhino mesh. + /// Uses the topology of the Rhino mesh directly. + /// + /// A which represents the topology and geometry of the source mesh. + /// A Rhino mesh to convert from. + public static PlanktonMesh ToPlanktonMesh(this Mesh source) + { + PlanktonMesh pMesh = new PlanktonMesh(); + + source.Vertices.CombineIdentical(true, true); + source.Vertices.CullUnused(); + source.UnifyNormals(); + source.Weld(Math.PI); + + foreach (Point3f v in source.TopologyVertices) + { + pMesh.Vertices.Add(v.X, v.Y, v.Z); + } + + for (int i = 0; i < source.Faces.Count; i++) + { + pMesh.Faces.Add(new PlanktonFace()); + } + + for (int i = 0; i < source.TopologyEdges.Count; i++) + { + PlanktonHalfedge HalfA = new PlanktonHalfedge(); + + HalfA.StartVertex = source.TopologyEdges.GetTopologyVertices(i).I; + + if (pMesh.Vertices [HalfA.StartVertex].OutgoingHalfedge == -1) { + pMesh.Vertices [HalfA.StartVertex].OutgoingHalfedge = pMesh.Halfedges.Count; + } + + PlanktonHalfedge HalfB = new PlanktonHalfedge(); + + HalfB.StartVertex = source.TopologyEdges.GetTopologyVertices(i).J; + + if (pMesh.Vertices [HalfB.StartVertex].OutgoingHalfedge == -1) { + pMesh.Vertices [HalfB.StartVertex].OutgoingHalfedge = pMesh.Halfedges.Count + 1; + } + + bool[] Match; + int[] ConnectedFaces = source.TopologyEdges.GetConnectedFaces(i, out Match); + + //Note for Steve Baer : This Match bool doesn't seem to work on triangulated meshes - it often returns true + //for both faces, even for a properly oriented manifold mesh, which can't be right + //So - making our own check for matching: + //(I suspect the problem is related to C being the same as D for triangles, so best to + //deal with them separately just to make sure) + //loop through the vertices of the face until finding the one which is the same as the start of the edge + //if the next vertex around the face is the end of the edge then it matches. + + Match[0] = false; + if (Match.Length > 1) + {Match[1] = true;} + + int VertA = source.TopologyVertices.TopologyVertexIndex(source.Faces[ConnectedFaces[0]].A); + int VertB = source.TopologyVertices.TopologyVertexIndex(source.Faces[ConnectedFaces[0]].B); + int VertC = source.TopologyVertices.TopologyVertexIndex(source.Faces[ConnectedFaces[0]].C); + int VertD = source.TopologyVertices.TopologyVertexIndex(source.Faces[ConnectedFaces[0]].D); + + if ((VertA == source.TopologyEdges.GetTopologyVertices(i).I) + && (VertB == source.TopologyEdges.GetTopologyVertices(i).J)) + { Match[0] = true; + } + if ((VertB == source.TopologyEdges.GetTopologyVertices(i).I) + && (VertC == source.TopologyEdges.GetTopologyVertices(i).J)) + { + Match[0] = true; + } + if ((VertC == source.TopologyEdges.GetTopologyVertices(i).I) + && (VertD == source.TopologyEdges.GetTopologyVertices(i).J)) + { + Match[0] = true; + } + if ((VertD == source.TopologyEdges.GetTopologyVertices(i).I) + && (VertA == source.TopologyEdges.GetTopologyVertices(i).J)) + { + Match[0] = true; + } + //I don't think these next 2 should ever be needed, but just in case: + if ((VertC == source.TopologyEdges.GetTopologyVertices(i).I) + && (VertA == source.TopologyEdges.GetTopologyVertices(i).J)) + { + Match[0] = true; + } + if ((VertB == source.TopologyEdges.GetTopologyVertices(i).I) + && (VertD == source.TopologyEdges.GetTopologyVertices(i).J)) + { + Match[0] = true; + } + + if (Match[0] == true) + { + HalfA.AdjacentFace = ConnectedFaces[0]; + if (pMesh.Faces[HalfA.AdjacentFace].FirstHalfedge == -1) { + pMesh.Faces[HalfA.AdjacentFace].FirstHalfedge = pMesh.Halfedges.Count; + } + if (ConnectedFaces.Length > 1) + { + HalfB.AdjacentFace = ConnectedFaces[1]; + if (pMesh.Faces[HalfB.AdjacentFace].FirstHalfedge == -1) { + pMesh.Faces[HalfB.AdjacentFace].FirstHalfedge = pMesh.Halfedges.Count + 1; + } + } + else + { + HalfB.AdjacentFace = -1; + pMesh.Vertices[HalfB.StartVertex].OutgoingHalfedge = pMesh.Halfedges.Count + 1; + } + } + else + { + HalfB.AdjacentFace = ConnectedFaces[0]; + + if (pMesh.Faces[HalfB.AdjacentFace].FirstHalfedge == -1) { + pMesh.Faces[HalfB.AdjacentFace].FirstHalfedge = pMesh.Halfedges.Count + 1; + } + + if (ConnectedFaces.Length > 1) + { + HalfA.AdjacentFace = ConnectedFaces[1]; + + if (pMesh.Faces[HalfA.AdjacentFace].FirstHalfedge == -1) { + pMesh.Faces[HalfA.AdjacentFace].FirstHalfedge = pMesh.Halfedges.Count; + } + } + else + { + HalfA.AdjacentFace = -1; + pMesh.Vertices[HalfA.StartVertex].OutgoingHalfedge = pMesh.Halfedges.Count; + } + } + pMesh.Halfedges.Add(HalfA); + pMesh.Halfedges.Add(HalfB); + } + + for (int i = 0; i < (pMesh.Halfedges.Count); i += 2) + { + int[] EndNeighbours = source.TopologyVertices.ConnectedTopologyVertices(pMesh.Halfedges[i + 1].StartVertex, true); + for (int j = 0; j < EndNeighbours.Length; j++) + { + if(EndNeighbours[j] == pMesh.Halfedges[i].StartVertex) + { + int EndOfNextHalfedge = EndNeighbours[(j - 1 + EndNeighbours.Length) % EndNeighbours.Length]; + int StartOfPrevOfPairHalfedge = EndNeighbours[(j + 1) % EndNeighbours.Length]; + + int NextEdge = source.TopologyEdges.GetEdgeIndex(pMesh.Halfedges[i + 1].StartVertex,EndOfNextHalfedge); + int PrevPairEdge = source.TopologyEdges.GetEdgeIndex(pMesh.Halfedges[i + 1].StartVertex,StartOfPrevOfPairHalfedge); + + if (source.TopologyEdges.GetTopologyVertices(NextEdge).I == pMesh.Halfedges[i + 1].StartVertex) { + pMesh.Halfedges[i].NextHalfedge = NextEdge * 2; + } else { + pMesh.Halfedges[i].NextHalfedge = NextEdge * 2 + 1; + } + + if (source.TopologyEdges.GetTopologyVertices(PrevPairEdge).J == pMesh.Halfedges[i + 1].StartVertex) { + pMesh.Halfedges[i + 1].PrevHalfedge = PrevPairEdge * 2; + } else { + pMesh.Halfedges[i + 1].PrevHalfedge = PrevPairEdge * 2+1; + } + break; + } + } + + int[] StartNeighbours = source.TopologyVertices.ConnectedTopologyVertices(pMesh.Halfedges[i].StartVertex, true); + for (int j = 0; j < StartNeighbours.Length; j++) + { + if (StartNeighbours[j] == pMesh.Halfedges[i+1].StartVertex) + { + int EndOfNextOfPairHalfedge = StartNeighbours[(j - 1 + StartNeighbours.Length) % StartNeighbours.Length]; + int StartOfPrevHalfedge = StartNeighbours[(j + 1) % StartNeighbours.Length]; + + int NextPairEdge = source.TopologyEdges.GetEdgeIndex(pMesh.Halfedges[i].StartVertex, EndOfNextOfPairHalfedge); + int PrevEdge = source.TopologyEdges.GetEdgeIndex(pMesh.Halfedges[i].StartVertex, StartOfPrevHalfedge); + + if (source.TopologyEdges.GetTopologyVertices(NextPairEdge).I == pMesh.Halfedges[i].StartVertex) { + pMesh.Halfedges[i + 1].NextHalfedge = NextPairEdge * 2; + } else { + pMesh.Halfedges[i + 1].NextHalfedge = NextPairEdge * 2 + 1; + } + + if (source.TopologyEdges.GetTopologyVertices(PrevEdge).J == pMesh.Halfedges[i].StartVertex) { + pMesh.Halfedges[i].PrevHalfedge = PrevEdge * 2; + } else { + pMesh.Halfedges[i].PrevHalfedge = PrevEdge * 2 + 1; + } + break; + } + } + } + + pMesh.Halfedges.AssignHalfEdgeIndex(); // by dyliu + + return pMesh; + } + + /// + /// Creates a Rhino mesh from a Plankton halfedge mesh. + /// Uses the face-vertex information available in the halfedge data structure. + /// + /// A which represents the source mesh (as best it can). + /// A Plankton mesh to convert from. + /// Any faces with five sides or more will be triangulated. + public static Mesh ToRhinoMesh(this PlanktonMesh source) + { + // could add different options for triangulating ngons later + Mesh rMesh = new Mesh(); + foreach (PlanktonVertex v in source.Vertices) + { + rMesh.Vertices.Add(v.X, v.Y, v.Z); + } + for (int i = 0; i < source.Faces.Count; i++) + { + int[] fvs = source.Faces.GetFaceVertices(i); + if (fvs.Length == 3) + { + rMesh.Faces.AddFace(fvs[0], fvs[1], fvs[2]); + } + else if (fvs.Length == 4) + { + rMesh.Faces.AddFace(fvs[0], fvs[1], fvs[2], fvs[3]); + } + else if (fvs.Length > 4) + { + // triangulate about face center (fan) + var fc = source.Faces.GetFaceCenter(i); + rMesh.Vertices.Add(fc.X, fc.Y, fc.Z); + for (int j = 0; j < fvs.Length; j++) + { + rMesh.Faces.AddFace(fvs[j], fvs[(j + 1) % fvs.Length], rMesh.Vertices.Count - 1); + } + } + } + rMesh.Normals.ComputeNormals(); + return rMesh; + } + + /// + /// in GH c# component, output planktonMesh as a ObjectWrapper, its value is a what we need. this method cast to it's value + /// + /// + /// + public static Mesh ToRhinoMesh(GH_ObjectWrapper objWrapper) + { + // could add different options for triangulating ngons later + PlanktonMesh source = objWrapper.Value as PlanktonMesh; + Mesh rMesh = new Mesh(); + foreach (PlanktonVertex v in source.Vertices) + { + rMesh.Vertices.Add(v.X, v.Y, v.Z); + } + for (int i = 0; i < source.Faces.Count; i++) + { + int[] fvs = source.Faces.GetFaceVertices(i); + if (fvs.Length == 3) + { + rMesh.Faces.AddFace(fvs[0], fvs[1], fvs[2]); + } + else if (fvs.Length == 4) + { + rMesh.Faces.AddFace(fvs[0], fvs[1], fvs[2], fvs[3]); + } + else if (fvs.Length > 4) + { + // triangulate about face center (fan) + var fc = source.Faces.GetFaceCenter(i); + rMesh.Vertices.Add(fc.X, fc.Y, fc.Z); + for (int j = 0; j < fvs.Length; j++) + { + rMesh.Faces.AddFace(fvs[j], fvs[(j + 1) % fvs.Length], rMesh.Vertices.Count - 1); + } + } + } + rMesh.Normals.ComputeNormals(); + return rMesh; + } + + + // !!! + /// + /// Replaces the vertices of a PlanktonMesh with a new list of points + /// + /// A list of closed polylines representing the boundary edges of each face. + /// A Plankton mesh. + /// A list of points. + public static PlanktonMesh ReplaceVertices(this PlanktonMesh source, List points) + { + PlanktonMesh pMesh = source; + for (int i = 0; i < points.Count; i++) + { + pMesh.Vertices.SetVertex(i, points[i]); + } + return pMesh; + } + + + /// + /// Converts each face to a closed polyline. + /// + /// A list of closed polylines representing the boundary edges of each face. + /// A Plankton mesh. + public static Polyline[] ToPolylines(this PlanktonMesh source) + { + int n = source.Faces.Count; + Polyline[] polylines = new Polyline[n]; + for (int i = 0; i < n; i++) + { + Polyline facePoly = new Polyline(); + int[] vs = source.Faces.GetFaceVertices(i); + for (int j = 0; j <= vs.Length; j++) + { + var v = source.Vertices[vs[j % vs.Length]]; + facePoly.Add(v.X, v.Y, v.Z); + } + polylines[i] = facePoly; + } + + return polylines; + } + + public static List RhinoMeshToPolylines(Mesh mesh) + { + List polylines = new List(); + for (int i = 0; i < mesh.Faces.Count; i++) + { + List iVertexIDs = mesh.Faces.GetTopologicalVertices(i).ToList(); + Polyline iPolyline = new Polyline( + iVertexIDs.Select(o => mesh.Vertices.ToPoint3dArray().ToList()[o])); + polylines.Add(iPolyline); + + } + return polylines; + } + + /// + /// Creates a Rhino Point3f from a Plankton vertex. + /// + /// A Plankton vertex + /// A Point3f with the same coordinates as the vertex. + public static Point3f ToPoint3f(this PlanktonVertex vertex) + { + return new Point3f(vertex.X, vertex.Y, vertex.Z); + } + + /// + /// Creates a Rhino Point3d from a Plankton vertex. + /// + /// A Plankton vertex + /// A Point3d with the same coordinates as the vertex. + public static Point3d ToPoint3d(this PlanktonVertex vertex) + { + return new Point3d(vertex.X, vertex.Y, vertex.Z); + } + + /// + /// Creates a list of Rhino Point3d from a Plankton vertex list. + /// + /// + /// + public static List VertexListToPoint3d(PlanktonVertexList l) + { + return l.Select(o => RhinoSupport.ToPoint3d(o)).ToList(); + } + + public static PlanktonXYZ ToPlanktonXYZ(Point3d pt) + { + return new Plankton.PlanktonXYZ((float)pt.X, (float)pt.Y, (float)pt.Z); + } + + public static PlanktonXYZ ToPlanktonXYZ(PlanktonVertex v) + { + return new PlanktonXYZ((float)v.ToPoint3d().X, (float)v.ToPoint3d().Y, (float)v.ToPoint3d().Z); + } + /// + /// Creates a Rhino Point3f from a Plankton vector. + /// + /// A Plankton vector. + /// A Point3f with the same XYZ components as the vector. + public static Point3f ToPoint3f(this PlanktonXYZ vector) + { + return new Point3f(vector.X, vector.Y, vector.Z); + } + + /// + /// Creates a Rhino Point3d from a Plankton vector. + /// + /// A Plankton vector. + /// A Point3d with the same XYZ components as the vector. + public static Point3d ToPoint3d(this PlanktonXYZ vector) + { + return new Point3d(vector.X, vector.Y, vector.Z); + } + + /// + /// Creates a Rhino Vector3f from a Plankton vector. + /// + /// A Plankton vector. + /// A Vector3f with the same XYZ components as the vector. + public static Vector3f ToVector3f(this PlanktonXYZ vector) + { + return new Vector3f(vector.X, vector.Y, vector.Z); + } + + /// + /// Sets or adds a vertex to the Vertex List. + /// If [index] is less than [Count], the existing vertex at [index] will be modified. + /// If [index] equals [Count], a new vertex is appended to the end of the vertex list. + /// If [index] is larger than [Count], the function will return false. + /// + /// Index of vertex to set. + /// Vertex location. + /// true on success, false on failure. + public static bool SetVertex(this PlanktonVertexList vertexList, int index, Point3f vertex) + { + return vertexList.SetVertex(index, vertex.X, vertex.Y, vertex.Z); + } + + /// + /// Sets or adds a vertex to the Vertex List. + /// If [index] is less than [Count], the existing vertex at [index] will be modified. + /// If [index] equals [Count], a new vertex is appended to the end of the vertex list. + /// If [index] is larger than [Count], the function will return false. + /// + /// Index of vertex to set. + /// Vertex location. + /// true on success, false on failure. + public static bool SetVertex(this PlanktonVertexList vertexList, int index, Point3d vertex) + { + return vertexList.SetVertex(index, vertex.X, vertex.Y, vertex.Z); + } + + // !!! + /// + /// Moves a vertex by a vector. + /// + /// Index of vertex to move. + /// Vector to move by. + /// true on success, false on failure. + public static bool MoveVertex(this PlanktonVertexList vertexList, int index, Vector3d vector) + { + return vertexList.SetVertex(index, vertexList[index].X + vector.X, vertexList[index].Y + vector.Y, vertexList[index].Z + vector.Z); + } + + /// + /// Adds a new vertex to the end of the Vertex list. + /// + /// Location of new vertex. + /// The index of the newly added vertex. + public static int Add(this PlanktonVertexList vertexList, Point3f vertex) + { + return vertexList.Add(vertex.X, vertex.Y, vertex.Z); + } + + /// + /// Adds a new vertex to the end of the Vertex list. + /// + /// Location of new vertex. + /// The index of the newly added vertex. + public static int Add(this PlanktonVertexList vertexList, Point3d vertex) + { + return vertexList.Add(vertex.X, vertex.Y, vertex.Z); + } + + /// + /// Gets positions of vertices + /// + /// A list of Point3d + /// A Plankton mesh. + public static IEnumerable GetPositions(this PlanktonMesh source) + { + return Enumerable.Range(0, source.Vertices.Count).Select(i => source.Vertices[i].ToPoint3d()); + } + + #region by dyliu + + /// + /// Gets area of a planar quad + /// + /// + /// + public static double QuadArea(Surface srf) + { + double area = 0; + double width; + double height; + + if (srf.GetSurfaceSize(out width, out height)) + { + area = width * height; + } + + return area; + } + + /// + /// Gets the area of a triangle + /// + /// + /// + /// + /// + public static double TriangleArea(Point3d A, Point3d B, Point3d C) + { + double area; + return area = Math.Abs((A.X * (B.Y - C.Y) + B.X * (A.Y - C.Y) + C.X * (A.Y - B.Y)) / 2); + } + + /// + /// Constructs a rhino mesh from a list of srfs + /// + /// + /// + public static Mesh SrfToRhinoMesh(List srfs) + { + Mesh msh = new Mesh(); + + int vertexCounter = 0; + foreach (Surface srf in srfs) + { + srf.SetDomain(0, new Interval(0, 1)); + srf.SetDomain(1, new Interval(0, 1)); + + Point3d cornerA = srf.PointAt(0, 0); + Point3d cornerB = srf.PointAt(0, 1); + Point3d cornerC = srf.PointAt(1, 0); + Point3d cornerD = srf.PointAt(1, 1); + + if (RhinoSupport.QuadArea(srf) == RhinoSupport.TriangleArea(cornerA, cornerB, cornerC)) // triangle mesh face + { + msh.Vertices.Add(cornerA); + msh.Vertices.Add(cornerB); + msh.Vertices.Add(cornerC); + msh.Faces.AddFace(vertexCounter, vertexCounter + 1, vertexCounter + 2); + vertexCounter += 3; + } + + else // quad mesh face + { + msh.Vertices.Add(cornerA); + msh.Vertices.Add(cornerB); + msh.Vertices.Add(cornerD); + msh.Vertices.Add(cornerC); + msh.Faces.AddFace(vertexCounter, vertexCounter + 1, vertexCounter + 2, vertexCounter + 3); + vertexCounter += 4; + } + } + + return msh; + } + + /// + /// get the boundary edges as a list of lines + /// + /// + /// + public static List GetBoundaryEdges(PlanktonMesh pmsh) + { + List bEdges = new List(); + + List nakedEdgeIndex = pmsh.Halfedges.Where(o => o.AdjacentFace == -1).Select(o => o.Index).ToList(); + + foreach (int i in nakedEdgeIndex) + { + int[] ends = pmsh.Halfedges.GetVertices(i); + + Point3d p1 = pmsh.Vertices[ends.First()].ToPoint3d(); + Point3d p2 = pmsh.Vertices[ends.Last()].ToPoint3d(); + bEdges.Add(new Line(p1, p2)); + } + + return bEdges; + } + + /// + /// + /// + /// + /// + public static List GetInnerEdges(PlanktonMesh pmsh) + { + List innerEdges = new List(); + + // when a halfedge.adjacentFace = -1 means it's a naked halfedge + // when a halfedge and its pair are not naked, it's an inner edge + List innerEdgeIDs = pmsh.Halfedges.Where(o => o.AdjacentFace != -1 && pmsh.Halfedges[ pmsh.Halfedges.GetPairHalfedge(o.Index)].AdjacentFace != -1 ).Select(o => o.Index).ToList(); + + // construct lines + foreach (int i in innerEdgeIDs) + { + int[] ends = pmsh.Halfedges.GetVertices(i); + Point3d p1 = pmsh.Vertices[ends.First()].ToPoint3d(); + Point3d p2 = pmsh.Vertices[ends.Last()].ToPoint3d(); + innerEdges.Add(new Line(p1, p2)); + } + + return innerEdges; + } + + /// + /// get boundary vertices as a list + /// + /// + /// + public static List GetBoundaryVertices(PlanktonMesh pmsh) + { + List bVerticesID = new List(); + List nakedEdgeIndex = pmsh.Halfedges.Where(o => o.AdjacentFace == -1).Select(o => o.Index).ToList(); + + foreach (int i in nakedEdgeIndex) + { + int[] ends = pmsh.Halfedges.GetVertices(i); + bVerticesID.AddRange(ends); + } + + bVerticesID.Distinct().ToList().Sort(); + + List bVertices = new List(); + foreach (int i in bVerticesID) + bVertices.Add(pmsh.Vertices[i]); + + return bVertices.Select(o => o.ToPoint3d()).ToList(); + } + + /// + /// get the inner/constraint vertices + /// + /// + /// + public static List GetConstraintVertices(PlanktonMesh pmsh) + { + + List bVerticesID = new List(); + List nakedEdgeIndex = pmsh.Halfedges.Where(o => o.AdjacentFace == -1).Select(o => o.Index).ToList(); + + foreach (int i in nakedEdgeIndex) + { + int[] ends = pmsh.Halfedges.GetVertices(i); + bVerticesID.AddRange(ends); + } + + bVerticesID.Distinct().ToList().Sort(); + + List bVertices = new List(); + foreach (int i in bVerticesID) + bVertices.Add(pmsh.Vertices[i]); + + List cVertices = pmsh.Vertices.ToList().Except(bVertices).ToList(); + return cVertices.Select(o => o.ToPoint3d()).ToList(); + } + + /// + /// get the indices of inner vertices of a pmesh + /// + /// + /// + public static List GetConstraintVertexIndices(PlanktonMesh pmsh) + { + List bVerticesID = new List(); + List nakedEdgeIndex = pmsh.Halfedges.Where(o => o.AdjacentFace == -1).Select(o => o.Index).ToList(); + + foreach (int i in nakedEdgeIndex) + { + int[] ends = pmsh.Halfedges.GetVertices(i); + bVerticesID.AddRange(ends); + } + + bVerticesID.Distinct().ToList().Sort(); + + List bVertices = new List(); + foreach (int i in bVerticesID) + bVertices.Add(pmsh.Vertices[i]); + + List cVertices = pmsh.Vertices.ToList().Except(bVertices).ToList(); + return cVertices.Select(o => o.Index).ToList(); + } + + /// + /// get the neighbour edges of a vertex + /// + /// + /// + /// + public static List NeighbourVertexEdges(PlanktonMesh pmsh, int vIndex) + { + // index + List neighborEdgesIndices = + pmsh.Halfedges.GetVertexCirculator( + pmsh.Vertices[vIndex].OutgoingHalfedge) + .ToList(); + + // plankton edges + List neighborPEdges = + pmsh.Halfedges.ToList() + .Where(o => neighborEdgesIndices.Contains(o.Index)) + .ToList(); + + // sort counterclockwise + List sortedNeighborPEdges = new List(); + + sortedNeighborPEdges = NeighbourSortingHelper(pmsh, vIndex, neighborPEdges); + + return sortedNeighborPEdges; + } + + ///!!! + /// + /// Sort the neighbour edges in a counterclockwise order + /// + /// + /// + /// + /// + public static List NeighbourSortingHelper(PlanktonMesh pmsh, int vIndex, List neighbourPEdges) + { + // halfedge to line + List neighbourLines = new List(); + foreach (var e in neighbourPEdges) + { + neighbourLines.Add(RhinoSupport.HalfEdgeToLine(pmsh, e)); + } + + // construct ref plane + Point3d origin = pmsh.Vertices[vIndex].ToPoint3d(); + Vector3d v = pmsh.Vertices.GetNormal(vIndex).ToVector3f(); + Plane refPlane = new Plane(origin, v); + + // project the other end point of neighbour edges to the plane + neighbourLines.ForEach(o => o.Transform(Transform.PlanarProjection(refPlane))); + + // look for the other end of the line other than the center vertex + for (int i = 0; i < neighbourLines.Count(); i++) + if (neighbourLines[i].PointAt(1).DistanceTo(origin) < neighbourLines[i].Length / 10000) { neighbourLines[i].Flip(); } + + Vector3d refPlaneX = refPlane.XAxis; + Vector3d refPlaneY = refPlane.YAxis; + List unitV = neighbourLines.Select(o => o.UnitTangent).ToList(); + List anglesToX = unitV.Select(o => Vector3d.VectorAngle(refPlaneX, o)).ToList(); + List anglesToY = unitV.Select(o => Vector3d.VectorAngle(refPlaneY, o)).ToList(); + + for (int i = 0; i < neighbourPEdges.Count(); i++) + { + neighbourPEdges[i].angleToX = anglesToX[i]; + neighbourPEdges[i].angleToY = anglesToY[i]; + } + + // sort by angles to x axis + List G1 = neighbourPEdges.Where(o => o.angleToX <= Math.PI / 2 && o.angleToY <= Math.PI / 2).ToList(); + List G2 = neighbourPEdges.Where(o => o.angleToX > Math.PI / 2 && o.angleToY < Math.PI / 2).ToList(); + List G3 = neighbourPEdges.Where(o => o.angleToX >= Math.PI / 2 && o.angleToY >= Math.PI / 2).ToList(); + List G4 = neighbourPEdges.Where(o => o.angleToX < Math.PI / 2 && o.angleToY > Math.PI / 2).ToList(); + G1 = G1.OrderBy(o => o.angleToX).ToList(); + G2 = G2.OrderBy(o => o.angleToX).ToList(); + G3 = G3.OrderByDescending(o => o.angleToX).ToList(); + G4 = G4.OrderByDescending(o => o.angleToX).ToList(); + + return G1.Concat(G2).Concat(G3).Concat(G4) + .ToList(); + } + + /// + /// + /// + /// + /// + /// + /// + public static List EdgeUnitVector(PlanktonMesh pmsh, int vIndex, List pEdges) + { + List vectors = new List(); + List lines = pEdges.Select(o => RhinoSupport.HalfEdgeToLine(pmsh, o)).ToList(); + Point3d center = pmsh.Vertices[vIndex].ToPoint3d(); + + for (int i = 0; i < lines.Count(); i++) + if (lines[i].PointAt(1).DistanceTo(center) < lines[i].Length / 10000) { lines[i].Flip(); } // flip the lines so that they start from center, pointing outwards + + vectors = lines.Select(o => o.UnitTangent).ToList(); + return vectors; + } + + /// + /// get the sector angles of a inner vertex + /// + /// + /// + /// + /// + public static List GetSectorAngles(PlanktonMesh pmsh, int vIndex, List pEdges) + { + List sectorAngles = new List(); + List unitVecters = RhinoSupport.EdgeUnitVector(pmsh, vIndex, pEdges); + + for (int i = 0; i < unitVecters.Count(); i++) + { + if (i != unitVecters.Count() - 1) { sectorAngles.Add(Vector3d.VectorAngle(unitVecters[i], unitVecters[i + 1]));} // not the last one + else if (i == unitVecters.Count() - 1) { sectorAngles.Add(Vector3d.VectorAngle(unitVecters[i], unitVecters[0]));} // last one in loop + + } + return sectorAngles; + } + + /// + /// get the fold angles of an inner vertex + /// + /// + /// + /// + /// + public static List GetFoldAngles(PlanktonMesh pmsh, int vIndex, List pEdges) + { + List foldAngles = new List(); + + for (int i = 0; i < pEdges.Count(); i++) + { + PlanktonHalfedge e1 = pEdges[i]; + PlanktonHalfedge e2 = pmsh.Halfedges[pmsh.Halfedges.GetPairHalfedge(pEdges[i].Index)]; + + // 2 adjacent faces of an edge + int f1Index = pmsh.Faces[e1.AdjacentFace].Index; + int f2Index = pmsh.Faces[e2.AdjacentFace].Index; + + Plane pln1 = new Plane(RhinoSupport.ToPolylines(pmsh)[f1Index].ToList()[0], RhinoSupport.ToPolylines(pmsh)[f1Index].ToList()[1], RhinoSupport.ToPolylines(pmsh)[f1Index].ToList()[2]); + Plane pln2 = new Plane(RhinoSupport.ToPolylines(pmsh)[f2Index].ToList()[0], RhinoSupport.ToPolylines(pmsh)[f2Index].ToList()[1], RhinoSupport.ToPolylines(pmsh)[f2Index].ToList()[2]); + + foldAngles.Add(Math.PI - Vector3d.VectorAngle(pln1.Normal, pln2.Normal)); // * pEdges[i].MV + + } + return foldAngles; + } + + /// + /// given pmesh and edge, gives a line of the edge + /// + /// + /// + /// + public static Line HalfEdgeToLine(PlanktonMesh pmsh, PlanktonHalfedge e) + { + Point3d p1 = pmsh.Vertices[e.StartVertex].ToPoint3d(); + + Point3d p2 = pmsh.Vertices[pmsh.Halfedges[pmsh.Halfedges.GetPairHalfedge(e.Index)].StartVertex].ToPoint3d(); + + return new Line(p1, p2); + } + public static Line HalfEdgeToLine(PlanktonMesh pmsh, int e) + { + Point3d p1 = pmsh.Vertices[pmsh.Halfedges[e].StartVertex].ToPoint3d(); + + Point3d p2 = pmsh.Vertices[pmsh.Halfedges[pmsh.Halfedges.GetPairHalfedge(e)].StartVertex].ToPoint3d(); + + return new Line(p1, p2); + } + + /// + /// dertermine if one edge is valley or mountain. Mountain as 1, and valley as -1 + /// + /// + /// + /// + public static int MVDetermination(PlanktonMesh pmsh, int eIndex) + { + + // + Line l1_up = new Line(); + Line l1_down = new Line(); + Line l2_up = new Line(); + Line l2_down = new Line(); + + // face1 and face2 are the adjacent faces of a edge + int face1 = pmsh.Halfedges[eIndex].AdjacentFace; + int face2 = pmsh.Halfedges[pmsh.Halfedges.GetPairHalfedge(eIndex)].AdjacentFace; + + if (face1 == -1 || face2 == -1) + { + return 0; // this is a naked edge + } + + l1_up = GetFaceNormal(pmsh, face1).First(); + l1_down = GetFaceNormal(pmsh, face1).Last(); + l2_up = GetFaceNormal(pmsh, face2).First(); // !!! face2 out of index ??? + l2_down = GetFaceNormal(pmsh, face2).Last(); + + if (l1_up.PointAt(1).DistanceTo(l2_up.PointAt(1)) < + l1_down.PointAt(1).DistanceTo(l2_down.PointAt(1))) + { + return 1; + } + else if (l1_up.PointAt(1).DistanceTo(l2_up.PointAt(1)) > + l1_down.PointAt(1).DistanceTo(l2_down.PointAt(1))) + return -1; + else + return 0; + + } + + /// + /// get 2 normal verters(both direction) as 2 lines + /// + /// + /// + /// + public static List GetFaceNormal(PlanktonMesh pmsh, int fIndex) + { + //if (fIndex == -1) fIndex = 1; + + // get the vertices of a face in + List pts = RhinoSupport.ToPolylines(pmsh)[fIndex].ToList(); + + Point3d center = pmsh.Faces.GetFaceCenter(fIndex).ToPoint3d(); + Vector3d v = Vector3d.CrossProduct(new Line(pts[0], pts[1]).UnitTangent, new Line(pts[1], pts[2]).UnitTangent); + + List ls = new List(); + ls.Add(new Line(center, v)); + ls.Add(new Line(center, -v)); + + // lines start from center of the face, pointing up(first item) and down(second item) + return ls; // unit length + } + + + #region subdivision + + /// + /// check if the planktonMesh is fine enough, if yes + /// + /// + /// + /// + public static bool CheckFixPointVertex(PlanktonMesh p, List fixPts, double tolerance, out List vertexIdsToBeMoved) + { + + bool fineEnough = false; + + // convert all the vertices of plankton mesh into a list of points + List meshVertices = p.Vertices.ToList().Select(o => RhinoSupport.ToPoint3d(o)).ToList(); + + // check if each point of the input list of pts find one unique vertice in the existing mesh within tolerence distance + List closeVertexIDs = new List(); + + for (int i = 0; i < fixPts.Count; i++) + { + double[] distances = + meshVertices.Select(o => o.DistanceTo(fixPts[i])).ToArray(); + + if (distances.Min() <= tolerance) + { + int closeVertexID = Array.IndexOf(distances, distances.Min()); + closeVertexIDs.Add(closeVertexID); + } + } + + // if yes, output these IDs and return true; otherwise output false and return null + if (closeVertexIDs.Count == fixPts.Count && closeVertexIDs.Distinct().Count() == closeVertexIDs.Count()) + { + fineEnough = true; + vertexIdsToBeMoved = closeVertexIDs; + } + else + vertexIdsToBeMoved = null; + + return fineEnough; + } + + /// + /// check if the planktonMesh is fine enough(how to implement R-Tree in this method) + /// + /// + /// + /// + /// + public static bool CheckFixPointVertex(PlanktonMesh p, List fixPts, double tolerance) + { + + bool fineEnough = false; + + // convert all the vertices of plankton mesh into a list of points + List meshVertices = p.Vertices.ToList().Select(o => RhinoSupport.ToPoint3d(o)).ToList(); + + // check if each point of the input list of pts find one unique vertice in the existing mesh within tolerence distance + List closeVertexIDs = new List(); + + for (int i = 0; i < fixPts.Count; i++) + { + double[] distances = + meshVertices.Select(o => o.DistanceTo(fixPts[i])).ToArray(); + + if (distances.Max() <= tolerance) + { + int closeVertexID = Array.IndexOf(distances, distances.Min()); + closeVertexIDs.Add(closeVertexID); + } + } + + // if yes, output these IDs and return true; otherwise output false and return null + if (closeVertexIDs == null) + return fineEnough; + + else if (closeVertexIDs.Count == fixPts.Count && closeVertexIDs.Distinct().Count() == closeVertexIDs.Count()) + { + fineEnough = true; + } + + return fineEnough; + + } + + /// Working!!!!!! + /// + /// subdivide each quad face into four smaller quad faces + /// + /// + public static PlanktonMesh QuadSubdivide(PlanktonMesh P, List faceIDs) + { + PlanktonMesh newPmsh = new PlanktonMesh(); + // adopt all old vertices + newPmsh.Vertices.AddVertices(P.Vertices.ToList().Select(o => RhinoSupport.ToPlanktonXYZ(o)).ToList()); + + // prepare new face vertices ids + List> divideFaceIDs = new List>(); + int count = P.Vertices.Count; + + for (int i = 0; i < faceIDs.Count; i++) + { + List iFaceIDs = P.Faces.GetFaceVertices(faceIDs[i]).ToList(); + iFaceIDs.AddRange(new List { count + i*5 , count+ i * 5 + 1, count+i * 5 + 2, count+i * 5 + 3, count+i * 5 + 4 }); + //iFaceIDs.AddRange(new List { count, count + 1, count + 2, count + 3, count + 4 }); + + divideFaceIDs.Add(iFaceIDs); + } + + // append new vertex in new mesh + List> newVertices = new List>(); + + for (int j = 0; j < faceIDs.Count; j++) + { + // append center point + Point3d jCenter = P.Faces.GetFaceCenter(faceIDs[j]).ToPoint3d(); + // append middle points of bounding halfedges of a face + List midPts = + P.Faces.GetHalfedges(faceIDs[j]).ToList().Select(o => RhinoSupport.HalfEdgeToLine(P, o).PointAt(0.5)).ToList(); + midPts.Add(jCenter); + + // 5 new vertices in midPts and add to big list. + newVertices.Add(midPts); + } + + // append new vertices to the mesh structure, 5 * faceToDivide vertices are appended. They have duplicate. + for(int l = 0; l < newVertices.Count; l++) + { + newPmsh.Vertices.AddVertices(newVertices[l].Select(o => RhinoSupport.ToPlanktonXYZ(o)).ToList()); + } + + // add all the subdivided faces in the new mesh now + for (int p = 0; p < faceIDs.Count; p++) + { + List pFaceIDs = divideFaceIDs[p]; + newPmsh.Faces.AddFace(pFaceIDs[8], pFaceIDs[7], pFaceIDs[0], pFaceIDs[4]); + newPmsh.Faces.AddFace(pFaceIDs[8], pFaceIDs[4], pFaceIDs[1], pFaceIDs[5]); + newPmsh.Faces.AddFace(pFaceIDs[8], pFaceIDs[5], pFaceIDs[2], pFaceIDs[6]); + newPmsh.Faces.AddFace(pFaceIDs[8], pFaceIDs[6], pFaceIDs[3], pFaceIDs[7]); + } + + // add all the unchanged faces to the new mesh now + for (int q = 0; q < P.Faces.Count; q++) + { + if (faceIDs.Any(o => o == q)) // if yes, this is a target face to subdivide and skip it + ; + else + { + newPmsh.Faces.AddFace(P.Faces.GetFaceVertices(q)); + } + } + + newPmsh.Faces.AssignFaceIndex(); + newPmsh.Vertices.AssignVertexIndex(); + newPmsh.Halfedges.AssignHalfEdgeIndex(); + + return newPmsh; + + } + + /// Working!!!!!! + /// + /// subdivide all faces + /// + /// + /// + public static PlanktonMesh QuadSubdivide(PlanktonMesh P) + { + PlanktonMesh newPmsh = new PlanktonMesh(); + // adopt all old vertices + newPmsh.Vertices.AddVertices(P.Vertices.ToList().Select(o => RhinoSupport.ToPlanktonXYZ(o)).ToList()); + + // prepare new face vertices ids + List> divideFaceIDs = new List>(); // + int vertexCount = P.Vertices.Count; + int faceCount = P.Faces.Count; + List faceIDs = Enumerable.Range(0, faceCount).ToList(); // a sequence of int, {0, 1, 2, ... , count - 1} + + for (int i = 0; i < faceCount; i++) + { + // get face vertex ids of the old mesh + List iFaceIDs = P.Faces.GetFaceVertices(faceIDs[i]).ToList(); + iFaceIDs.AddRange(new List { vertexCount + i * 5, vertexCount + i * 5 + 1, vertexCount + i * 5 + 2, vertexCount + i * 5 + 3, vertexCount + i * 5 + 4 }); + //iFaceIDs.AddRange(new List { count, count + 1, count + 2, count + 3, count + 4 }); + + divideFaceIDs.Add(iFaceIDs); + } + + // append new vertex in new mesh + List> newVertices = new List>(); + + for (int j = 0; j < faceIDs.Count; j++) + { + // append center point + Point3d jCenter = P.Faces.GetFaceCenter(faceIDs[j]).ToPoint3d(); + // append middle points of bounding halfedges of a face + List midPts = + P.Faces.GetHalfedges(faceIDs[j]).ToList().Select(o => RhinoSupport.HalfEdgeToLine(P, o).PointAt(0.5)).ToList(); + midPts.Add(jCenter); + + // 5 new vertices in midPts and add to big list. + newVertices.Add(midPts); + } + + // append new vertices to the mesh structure, 5 * faceToDivide vertices are appended. They have duplicate. + for (int l = 0; l < newVertices.Count; l++) + { + newPmsh.Vertices.AddVertices(newVertices[l].Select(o => RhinoSupport.ToPlanktonXYZ(o)).ToList()); + } + + // add all the subdivided faces in the new mesh now + for (int p = 0; p < faceIDs.Count; p++) + { + List pFaceIDs = divideFaceIDs[p]; + newPmsh.Faces.AddFace(pFaceIDs[8], pFaceIDs[7], pFaceIDs[0], pFaceIDs[4]); + newPmsh.Faces.AddFace(pFaceIDs[8], pFaceIDs[4], pFaceIDs[1], pFaceIDs[5]); + newPmsh.Faces.AddFace(pFaceIDs[8], pFaceIDs[5], pFaceIDs[2], pFaceIDs[6]); + newPmsh.Faces.AddFace(pFaceIDs[8], pFaceIDs[6], pFaceIDs[3], pFaceIDs[7]); + } + + // add all the unchanged faces to the new mesh now + for (int q = 0; q < P.Faces.Count; q++) + { + if (faceIDs.Any(o => o == q)) // if yes, this is a target face to subdivide and skip it + ; + else + { + newPmsh.Faces.AddFace(P.Faces.GetFaceVertices(q)); + } + } + + newPmsh.Faces.AssignFaceIndex(); + newPmsh.Vertices.AssignVertexIndex(); + newPmsh.Halfedges.AssignHalfEdgeIndex(); + + return RhinoSupport.WeldPMesh(newPmsh); + } + + /// Working!!!!!! + /// + /// Plankton mesh will be changed by appending more vertices + /// + /// + /// + /// + /// return 9 integers which will be used to query the vertex when constructing a new mesh + /// + public static PlanktonMesh SubdivideOneQuad(PlanktonMesh P, int i) + { + // copy vertices from old plankton mesh to new plankton mesh + PlanktonMesh dividedPMesh = new PlanktonMesh(); + dividedPMesh.Vertices.AddVertices(P.Vertices.Select(o => RhinoSupport.ToPlanktonXYZ(o)).ToList()); + + // ----------------9 index IDs for face construction-------------------- + List newVertexIDs = new List(); + + // 4 face vertices index of face No. i + List oldVertexIDs = P.Faces.GetFaceVertices(i).ToList(); + // 5 more integer counting for a quad face + List appendVertexIDs = new List{ dividedPMesh.Vertices.Count, dividedPMesh.Vertices.Count + 1, dividedPMesh.Vertices.Count + 2, dividedPMesh.Vertices.Count + 3, dividedPMesh.Vertices.Count + 4}; + newVertexIDs = oldVertexIDs.Concat(appendVertexIDs).ToList(); + + // ----------------append PlanktonVertices---------- + + // center of this face + Point3d centerPt = P.Faces.GetFaceCenter(i).ToPoint3d(); + + // middle points of bounding halfedges of a face + List midPts = + P.Faces.GetHalfedges(i).ToList().Select(o => RhinoSupport.HalfEdgeToLine(P, o).PointAt(0.5)).ToList(); + midPts.Add(centerPt); + + dividedPMesh.Vertices.AddVertices(midPts.Select(o => RhinoSupport.ToPlanktonXYZ(o)).ToList()); + + // ---------------construct new faces----------------- + // index + // 0 --- 4 --- 1 + // | | | + // 7 --- 8 --- 5 + // | | | + // 3 --- 6 --- 2 + dividedPMesh.Faces.AddFace(newVertexIDs[8], newVertexIDs[7], newVertexIDs[0], newVertexIDs[4]); + dividedPMesh.Faces.AddFace(newVertexIDs[8], newVertexIDs[4], newVertexIDs[1], newVertexIDs[5]); + dividedPMesh.Faces.AddFace(newVertexIDs[8], newVertexIDs[5], newVertexIDs[2], newVertexIDs[6]); + dividedPMesh.Faces.AddFace(newVertexIDs[8], newVertexIDs[6], newVertexIDs[3], newVertexIDs[7]); + + for (int j = 0; j < P.Faces.Count; j++) + { + if (j != i) // append other faces other than face i which has just been divided! + { + List jFaceVertices = P.Faces.GetFaceVertices(j).ToList(); + dividedPMesh.Faces.AddFace(jFaceVertices); + + } + } + + dividedPMesh.Faces.AssignFaceIndex(); + dividedPMesh.Vertices.AssignVertexIndex(); + dividedPMesh.Halfedges.AssignHalfEdgeIndex(); + + Mesh tmpRhinoMesh = RhinoSupport.ToRhinoMesh(dividedPMesh); + tmpRhinoMesh.Weld(0.1); + dividedPMesh = RhinoSupport.ToPlanktonMesh(tmpRhinoMesh); + + return dividedPMesh; + } + + /// + /// select those quad faces which are near fix points and divide them + /// + /// + /// + /// + public static void SelectedQuadSubdivide(PlanktonMesh P, List fixPts) + { + List centers = P.Faces.ToList().Select(o => RhinoSupport.ToPoint3d(P.Faces.GetFaceCenter(o.Index))).ToList(); + + // find which faces(ids) to divide + List ToDivideFaceIDs = new List(); + + for (int i = 0; i < fixPts.Count; i++) + { + List distances = + distances = centers.Select(o => o.DistanceTo(fixPts[i])).ToList(); + int closePtID = Array.IndexOf(distances.ToArray(), distances.Min()); + ToDivideFaceIDs.Add(closePtID); + } + + P = QuadSubdivide(P, ToDivideFaceIDs); + + } + + /// + /// move vertices to the input fix points + /// + /// + /// + /// + public static void MoveVertices(PlanktonMesh P, List targetPts, List vertexToMove) + { + + if (targetPts == null || vertexToMove == null) + return; + if (targetPts.Count != vertexToMove.Count) // not same amount, don¡¯t do nothing + return; + else + { + // move the vertices to their targets + for (int i = 0; i < targetPts.Count; i++) + P.Vertices.SetVertex(vertexToMove[i], targetPts[i]); + } + + } + + /// + /// use a rtree search to search for which are the closest points' ID + /// + /// + /// + public static void MoveVertices(PlanktonMesh P, List targetPts) + { + // all vertices as point3d + List meshVertices = P.Vertices.ToList().Select(o => RhinoSupport.ToPoint3d(o)).ToList(); + + // find out closest points + List closeVertexIDs = new List(); + + for (int i = 0; i < targetPts.Count; i++) + { + closeVertexIDs.Add(RhinoSupport.GetClosestPointID(meshVertices, targetPts[i], 0.1)); + } + + if (targetPts == null || closeVertexIDs == null) + return; + if (targetPts.Count != closeVertexIDs.Count) // not same amount, don¡¯t do nothing + return; + else + { + // move the vertices to their targets + for (int i = 0; i < targetPts.Count; i++) + P.Vertices.SetVertex(closeVertexIDs[i], targetPts[i]); + } + } + /// + /// weld a pmesh(they are broken after subdivide) + /// + /// + /// + public static PlanktonMesh WeldPMesh(PlanktonMesh P) + { + Mesh M = ToRhinoMesh(P); + M.Weld(0.1); + return RhinoSupport.ToPlanktonMesh(M); + } + + #endregion subdivision + + #region sofi + /// + /// GET closest point from a list using rtree + /// + /// + /// + /// + /// + public static int GetClosestPointID(List pts, Point3d pt, double sphereR) + { + RTree rtree = new RTree(); + for (int i = 0; i < pts.Count; i++) + rtree.Insert(pts[i], i); + List pointIds = new List(); + + EventHandler rtreeCallBack = (object sender, RTreeEventArgs args) => + { + pointIds.Add(args.Id); + }; + + bool findPt = false; + do + { + findPt = rtree.Search(new Sphere(pt, sphereR), rtreeCallBack); + sphereR *= 2; + + } while (findPt == false || pointIds.Count == 0); // do until close points are found! + + // if only one point is found, good, that 's the closest vertex, return it! + if (pointIds.Count == 1) + { + return pointIds.First(); + } + + else + { + List closePts = new List(); + foreach (int i in pointIds) + closePts.Add(pts[i]); + double[] distances = closePts.Select(o => o.DistanceTo(pt)).ToArray(); + int closeVertexID = pointIds[Array.IndexOf(distances, distances.Min())]; + + return closeVertexID; + } + } + + /// + /// offset curve + /// + /// + /// + /// + /// + public static Curve OffsetFaceCurve(PlanktonMesh p, int faceId, double d) + { + Curve offsetCrv = null; + Curve edgeCrv = p.ToPolylines().ToList()[faceId].ToNurbsCurve(); + Point3d pt = p.ToPolylines().ToList()[faceId].First(); + Vector3d v = RhinoSupport.GetFaceNormal(p, faceId).First().Direction; + CurveOffsetCornerStyle style = new CurveOffsetCornerStyle(); + offsetCrv = edgeCrv.Offset(new Plane(pt, -v), d, 0.01, style).First(); + return offsetCrv; + + } + + /// + /// offset the boundary of a face as polyline + /// + /// + /// + /// + /// + /// + public static Polyline OffsetFacePolyline(PlanktonMesh p, int faceId, double d, out List corners, out List beforeOffsetCorners) + { + Curve offsetCrv = null; + Polyline polyline = p.ToPolylines().ToList()[faceId]; + Curve edgeCrv = polyline.ToNurbsCurve(); + + List pts = p.ToPolylines().ToList()[faceId].ToList(); + beforeOffsetCorners = pts; + + PolylineCurve polylineCrv = new PolylineCurve(pts); + Point3d pt = pts.First(); + Vector3d v = RhinoSupport.GetFaceNormal(p, faceId).First().Direction; + CurveOffsetCornerStyle style = new CurveOffsetCornerStyle(); + offsetCrv = polylineCrv.Offset(new Plane(pt, -v), d, 0.01, style).First(); + + Polyline offsetPolyline = new Polyline(); + if (offsetCrv.IsPolyline()) + offsetCrv.ToNurbsCurve().TryGetPolyline(out offsetPolyline); + + corners = offsetPolyline.ToList(); + corners.RemoveAt(corners.Count - 1); + + return offsetPolyline; + + } + + public static Point3f Point3dToPoint3f(Point3d p) + { + return new Point3f((float)p.X, (float)p.Y, (float)p.Z); + } + + public static Point3d Point3fToPoint3d(Point3f p) + { + return new Point3d(p.X, p.Y, p.Z); + } + + //public static List> InsideMeshFaceIds(List> corners, PlanktonMesh p) + //{ + // List> meshFaceIds = new List>(); + // List polylines = RhinoSupport.ToPolylines(p).ToList(); + + //} + + public static bool isPointOnCurve(Point3d pt, Curve crv, double tolerence) + { + bool online = false; + double t = 0.0; + crv.ClosestPoint(pt, out t); + Point3d closestPt = crv.PointAt(t); + if (closestPt.DistanceTo(pt) < tolerence) + { + online = true; + } + return online; + } + + /// + /// tell if a line in on another line + /// + /// + /// + /// + public static bool isLineOnLine(Line l, Line line) + { + if (RhinoSupport.isPointOnCurve(l.PointAt(0), line.ToNurbsCurve(), 0.01) && RhinoSupport.isPointOnCurve(l.PointAt(1), line.ToNurbsCurve(), 0.01)) + { + return true; + } + + else + { + return false; + } + } + + public static bool isPointOnSurface(Point3d pt, Surface srf, double tolerence) + { + bool online = false; + double u = 0.0; + double v = 0.0; + + srf.ClosestPoint(pt, out u, out v); + Point3d closestPt = srf.PointAt(u , v); + if (closestPt.DistanceTo(pt) < tolerence) + { + online = true; + } + return online; + } + + #endregion + + #region structure fold + + public static double[,] getTranforamtionArray(Line bar, Plane globalCoor) + { + Plane localCoor = new Plane(bar.PointAt(0), bar.UnitTangent); + Vector3d localX = localCoor.XAxis / localCoor.XAxis.Length; + Vector3d localY = localCoor.YAxis / localCoor.YAxis.Length; + Vector3d localZ = localCoor.ZAxis / localCoor.ZAxis.Length; + + Vector3d globalX = globalCoor.XAxis / globalCoor.XAxis.Length; + Vector3d globalY = globalCoor.YAxis / globalCoor.YAxis.Length; + Vector3d globalZ = globalCoor.ZAxis / globalCoor.ZAxis.Length; + + double[,] t = + { + {Vector3d.Multiply(localX, globalX), Vector3d.Multiply(localX, globalY), Vector3d.Multiply(localX, globalZ),0,0,0 }, + {Vector3d.Multiply(localY, globalX), Vector3d.Multiply(localY, globalY), Vector3d.Multiply(localY, globalZ),0,0,0 }, + {Vector3d.Multiply(localZ, globalX), Vector3d.Multiply(localZ, globalY), Vector3d.Multiply(localZ, globalZ),0,0,0 }, + {0,0,0,Vector3d.Multiply(localX, globalX), Vector3d.Multiply(localX, globalY), Vector3d.Multiply(localX, globalZ)}, + {0,0,0,Vector3d.Multiply(localY, globalX), Vector3d.Multiply(localY, globalY), Vector3d.Multiply(localY, globalZ)}, + {0,0,0,Vector3d.Multiply(localZ, globalX), Vector3d.Multiply(localZ, globalY), Vector3d.Multiply(localZ, globalZ)}, + }; + + return t; + } + + + + #endregion + + + + #endregion by dyliu + } +} + diff --git a/src/PlanktonTests/FaceTest.cs b/src/PlanktonTests/FaceTest.cs index f0c47a7..ffb3e0b 100644 --- a/src/PlanktonTests/FaceTest.cs +++ b/src/PlanktonTests/FaceTest.cs @@ -1,362 +1,362 @@ -using System; -using System.Collections.Generic; -using NUnit.Framework; - -namespace Plankton.Test -{ - [TestFixture] - public class FaceTest - { - [Test] - public void CanSplitFace() - { - PlanktonMesh pMesh = new PlanktonMesh(); - - // Create one vertex for each corner of a square - pMesh.Vertices.Add(0, 0, 0); // 0 - pMesh.Vertices.Add(1, 0, 0); // 1 - pMesh.Vertices.Add(1, 1, 0); // 2 - pMesh.Vertices.Add(0, 1, 0); // 3 - - // Create one quadrangular face - pMesh.Faces.AddFace(0, 1, 2, 3); - - // Split face into two triangles - int new_he = pMesh.Faces.SplitFace(0, 4); - - // Returned halfedge should be adjacent to old face (#0) - Assert.AreEqual(0, pMesh.Halfedges[new_he].AdjacentFace); - - // Traverse from returned halfedge to new face - int new_he_pair = pMesh.Halfedges.GetPairHalfedge(new_he); - int new_face = pMesh.Halfedges[new_he_pair].AdjacentFace; - - Assert.AreEqual(1, new_face); - - // Check that both faces are now triangular - Assert.AreEqual(3, pMesh.Faces.GetFaceVertices(0).Length); - Assert.AreEqual(3, pMesh.Faces.GetFaceVertices(1).Length); - - // Check the halfedges of each face - Assert.AreEqual(new int[] { 8, 0, 2 }, pMesh.Faces.GetHalfedges(0)); - Assert.AreEqual(new int[] { 9, 4, 6 }, pMesh.Faces.GetHalfedges(1)); - } - - [Test] - public void CannotSplitFaceBadArguments() - { - PlanktonMesh pMesh = new PlanktonMesh(); - - // Create one vertex for each corner of a square - pMesh.Vertices.Add(0, 0, 0); // 0 - pMesh.Vertices.Add(1, 0, 0); // 1 - pMesh.Vertices.Add(1, 1, 0); // 2 - pMesh.Vertices.Add(0, 1, 0); // 3 - - // Create one quadrangular face - pMesh.Faces.AddFace(0, 1, 2, 3); - - // First halfedge is a boundary - Assert.AreEqual(-1, pMesh.Faces.SplitFace(1, 4)); - - // Second halfedge is a boundary - Assert.AreEqual(-1, pMesh.Faces.SplitFace(4, 1)); - - // Same halfedge used for both arguments - Assert.AreEqual(-1, pMesh.Faces.SplitFace(0, 0)); - - // Second halfedge is successor to first - Assert.AreEqual(-1, pMesh.Faces.SplitFace(0, 2)); - - // Second halfedge is predecessor to first - Assert.AreEqual(-1, pMesh.Faces.SplitFace(0, 6)); - } - - [Test] - public void CanMergeFaces() - { - PlanktonMesh pMesh = new PlanktonMesh(); - - // Create one vertex for each corner of a square - pMesh.Vertices.Add(0, 0, 0); // 0 - pMesh.Vertices.Add(1, 0, 0); // 1 - pMesh.Vertices.Add(1, 1, 0); // 2 - pMesh.Vertices.Add(0, 1, 0); // 3 - - // Create two triangular faces - pMesh.Faces.AddFace(0, 1, 2); - pMesh.Faces.AddFace(2, 3, 0); - - // Force merge to update outgoing halfedge of vertex #2 - pMesh.Vertices[2].OutgoingHalfedge = 4; - - // Merge faces - int h_rtn = pMesh.Faces.MergeFaces(4); - - // Check that the correct face was retained - int f = pMesh.Halfedges[h_rtn].AdjacentFace; - Assert.AreEqual(0, f); - - // Check face halfedges - int[] fhs = pMesh.Faces.GetHalfedges(f); - Assert.AreEqual(new int[] { 0, 2, 6, 8 }, fhs); - foreach (int h in fhs) - { - Assert.AreEqual(f, pMesh.Halfedges[h].AdjacentFace); - } - - // Check that outgoing halfedge of vertex #2 was updated correctly - Assert.AreEqual(6, pMesh.Vertices[2].OutgoingHalfedge); - Assert.AreEqual(f, pMesh.Halfedges[6].AdjacentFace); - } - - [Test] - public void CannotMergeFacesBoundary() - { - PlanktonMesh pMesh = new PlanktonMesh(); - - // Create one vertex for each corner of a square - pMesh.Vertices.Add(0, 0, 0); // 0 - pMesh.Vertices.Add(1, 0, 0); // 1 - pMesh.Vertices.Add(1, 1, 0); // 2 - pMesh.Vertices.Add(0, 1, 0); // 3 - - // Create one quadrangular face - pMesh.Faces.AddFace(0, 1, 2, 3); - - Assert.AreEqual(-1, pMesh.Faces.MergeFaces(0)); - } - - [Test] - public void CannotMergeFacesAntenna() - { - PlanktonMesh pMesh = new PlanktonMesh(); - - // Create one vertex for each corner of a square - pMesh.Vertices.Add(0, 0, 0); // 0 - pMesh.Vertices.Add(1, 0, 0); // 1 - pMesh.Vertices.Add(1, 1, 0); // 2 - pMesh.Vertices.Add(0, 1, 0); // 3 - pMesh.Vertices.Add(0.5, 0.5, 0); // 4 - - // Create two quadrangular faces - pMesh.Faces.AddFace(0, 1, 2, 4); - pMesh.Faces.AddFace(2, 3, 0, 4); - - // Merge should fail (faces are joined by two edges) - Assert.AreEqual(-1, pMesh.Faces.MergeFaces(4)); - } - - [Test] - public void CanSplitMergeInvariant() - { - PlanktonMesh pMesh = new PlanktonMesh(); - - // Create one vertex for each corner of a square - pMesh.Vertices.Add(0, 0, 0); // 0 - pMesh.Vertices.Add(1, 0, 0); // 1 - pMesh.Vertices.Add(1, 1, 0); // 2 - pMesh.Vertices.Add(0, 1, 0); // 3 - - // Create one quadrangular face - pMesh.Faces.AddFace(0, 1, 2, 3); - - int start_he = 0; - - // Split face into two triangles - int new_he = pMesh.Faces.SplitFace(start_he, 4); - - // Merge them back again - int old_he = pMesh.Faces.MergeFaces(new_he); - - // We should be back where we started... - Assert.AreEqual(start_he, old_he); - } - - [Test] - public void CanRemoveFace() - { - PlanktonMesh pMesh = new PlanktonMesh(); - - // Create one vertex for each corner of a square - pMesh.Vertices.Add(0, 0, 0); // 0 - pMesh.Vertices.Add(1, 0, 0); // 1 - pMesh.Vertices.Add(1, 1, 0); // 2 - pMesh.Vertices.Add(0, 1, 0); // 3 - pMesh.Vertices.Add(2, 0, 0); // 4 - pMesh.Vertices.Add(2, 1, 0); // 5 - - // Create two quadrangular faces - pMesh.Faces.AddFace(0, 1, 2, 3); - pMesh.Faces.AddFace(1, 4, 5, 2); - - // Traverse around mesh boundary and count halfedges - int count, he_first, he_current; - count = 0; - he_first = 1; - he_current = he_first; - do - { - count++; - he_current = pMesh.Halfedges[he_current].NextHalfedge; - } - while (he_current != he_first); - - Assert.AreEqual(6, count); - - // Remove the second face - pMesh.Faces.RemoveFace(1); - - // Count again... - count = 0; - he_first = 1; - he_current = he_first; - do - { - count++; - he_current = pMesh.Halfedges[he_current].NextHalfedge; - } - while (he_current != he_first); - - Assert.AreEqual(4, count); - } - - [Test] - public void CanCompact() - { - PlanktonMesh pMesh = new PlanktonMesh(); - - // Create one vertex for each corner of a square - pMesh.Vertices.Add(0, 0, 0); // 0 - pMesh.Vertices.Add(1, 0, 0); // 1 - pMesh.Vertices.Add(1, 1, 0); // 2 - pMesh.Vertices.Add(0, 1, 0); // 3 - - // Create two triangular faces - pMesh.Faces.AddFace(0, 1, 2); - pMesh.Faces.AddFace(2, 3, 0); - - // Merge faces and compact (squashing face #0) - pMesh.Faces.MergeFaces(4); - pMesh.Faces.CompactHelper(); - - // Check some things about the compacted mesh - Assert.AreEqual(1, pMesh.Faces.Count); - Assert.AreEqual(new int[] { 0, 1, 2, 3 }, pMesh.Faces.GetFaceVertices(0)); - } - - [Test] - public void CanTraverseUnusedFace() - { - PlanktonMesh pMesh = new PlanktonMesh(); - - // Add a single unset face - pMesh.Faces.Add(PlanktonFace.Unset); - - Assert.IsEmpty(pMesh.Faces.GetHalfedges(0)); - } - - [Test] - public void CanAddManyFaces() - { - PlanktonMesh pMesh = new PlanktonMesh(); - - // Create one vertex for each corner of a square - pMesh.Vertices.Add(0, 0, 0); // 0 - pMesh.Vertices.Add(1, 0, 0); // 1 - pMesh.Vertices.Add(1, 1, 0); // 2 - pMesh.Vertices.Add(0, 1, 0); // 3 - - // Create two triangular faces - var faces = new int[][] - { - new int[] { 0, 1, 2 }, - new int[] { 3, 2, 1 } - }; - var retval = pMesh.Faces.AddFaces(faces); - - Assert.AreEqual(new int[] { 0, 1 }, retval); - Assert.AreEqual(2, pMesh.Faces.Count); - Assert.AreEqual(faces[1], pMesh.Faces.GetFaceVertices(1)); - } - - [Test] - public void CanDeleteFaceAndAddFace() - { - /* - - 0 - 4 - 8 - 12 - | 0 | 3 | 6 | - 1 - 5 - 9 - 13 - | 1 | 4 | 7 | - 2 - 6 - 10- 14 - | 2 | 5 | 8 | - 3 - 7 - 11- 15 - - */ - - PlanktonMesh pMesh = new PlanktonMesh(); - - // Create 4x4 grid of vertices - pMesh.Vertices.Add(0, 3, 0); // 0 - pMesh.Vertices.Add(0, 2, 0); // 1 - pMesh.Vertices.Add(0, 1, 0); // 2 - pMesh.Vertices.Add(0, 0, 0); // 3 - pMesh.Vertices.Add(1, 3, 0); // 4 - pMesh.Vertices.Add(1, 2, 0); // 5 - pMesh.Vertices.Add(1, 1, 0); // 6 - pMesh.Vertices.Add(1, 0, 0); // 7 - pMesh.Vertices.Add(2, 3, 0); // 8 - pMesh.Vertices.Add(2, 2, 0); // 9 - pMesh.Vertices.Add(2, 1, 0); // 10 - pMesh.Vertices.Add(2, 0, 0); // 11 - pMesh.Vertices.Add(3, 3, 0); // 12 - pMesh.Vertices.Add(3, 2, 0); // 13 - pMesh.Vertices.Add(3, 1, 0); // 14 - pMesh.Vertices.Add(3, 0, 0); // 15 - - // Create nine quadrangular faces - pMesh.Faces.AddFace(0, 1, 5, 4); // 0 - pMesh.Faces.AddFace(1, 2, 6, 5); // 1 - pMesh.Faces.AddFace(2, 3, 7, 6); // 2 - pMesh.Faces.AddFace(4, 5, 9, 8); // 3 - pMesh.Faces.AddFace(5, 6, 10, 9); // 4 - pMesh.Faces.AddFace(6, 7, 11, 10); // 5 - pMesh.Faces.AddFace(8, 9, 13, 12); // 6 - pMesh.Faces.AddFace(9, 10, 14, 13); // 7 - pMesh.Faces.AddFace(10, 11, 15, 14); // 8 - - int id = 4; // center face - - // Get old face info - var faceCirculator = pMesh.Halfedges.GetFaceCirculator(pMesh.Faces[id].FirstHalfedge); - var faceVerts = new List(); - foreach (var i in faceCirculator) - { - faceVerts.Add(pMesh.Halfedges[i].StartVertex); - } - - // All face vertices should be internal (not boundary) - foreach (int i in faceVerts) - { - Assert.IsFalse(pMesh.Vertices.IsBoundary(i)); - } - - // Delete old face - pMesh.Faces.RemoveFace(id); - - // All face vertices should now be on a boundary - foreach (int i in faceVerts) - { - Assert.IsTrue(pMesh.Vertices.IsBoundary(i)); - } - - // pMesh.Compact(); - - // Re-add face - var res = pMesh.Faces.AddFace(faceVerts); - Assert.AreNotEqual(-1, res); - Assert.AreEqual(9, res); // res == 8 if mesh compacted beforehand - } - } -} +using System; +using System.Collections.Generic; +using NUnit.Framework; + +namespace Plankton.Test +{ + [TestFixture] + public class FaceTest + { + [Test] + public void CanSplitFace() + { + PlanktonMesh pMesh = new PlanktonMesh(); + + // Create one vertex for each corner of a square + pMesh.Vertices.Add(0, 0, 0); // 0 + pMesh.Vertices.Add(1, 0, 0); // 1 + pMesh.Vertices.Add(1, 1, 0); // 2 + pMesh.Vertices.Add(0, 1, 0); // 3 + + // Create one quadrangular face + pMesh.Faces.AddFace(0, 1, 2, 3); + + // Split face into two triangles + int new_he = pMesh.Faces.SplitFace(0, 4); + + // Returned halfedge should be adjacent to old face (#0) + Assert.AreEqual(0, pMesh.Halfedges[new_he].AdjacentFace); + + // Traverse from returned halfedge to new face + int new_he_pair = pMesh.Halfedges.GetPairHalfedge(new_he); + int new_face = pMesh.Halfedges[new_he_pair].AdjacentFace; + + Assert.AreEqual(1, new_face); + + // Check that both faces are now triangular + Assert.AreEqual(3, pMesh.Faces.GetFaceVertices(0).Length); + Assert.AreEqual(3, pMesh.Faces.GetFaceVertices(1).Length); + + // Check the halfedges of each face + Assert.AreEqual(new int[] { 8, 0, 2 }, pMesh.Faces.GetHalfedges(0)); + Assert.AreEqual(new int[] { 9, 4, 6 }, pMesh.Faces.GetHalfedges(1)); + } + + [Test] + public void CannotSplitFaceBadArguments() + { + PlanktonMesh pMesh = new PlanktonMesh(); + + // Create one vertex for each corner of a square + pMesh.Vertices.Add(0, 0, 0); // 0 + pMesh.Vertices.Add(1, 0, 0); // 1 + pMesh.Vertices.Add(1, 1, 0); // 2 + pMesh.Vertices.Add(0, 1, 0); // 3 + + // Create one quadrangular face + pMesh.Faces.AddFace(0, 1, 2, 3); + + // First halfedge is a boundary + Assert.AreEqual(-1, pMesh.Faces.SplitFace(1, 4)); + + // Second halfedge is a boundary + Assert.AreEqual(-1, pMesh.Faces.SplitFace(4, 1)); + + // Same halfedge used for both arguments + Assert.AreEqual(-1, pMesh.Faces.SplitFace(0, 0)); + + // Second halfedge is successor to first + Assert.AreEqual(-1, pMesh.Faces.SplitFace(0, 2)); + + // Second halfedge is predecessor to first + Assert.AreEqual(-1, pMesh.Faces.SplitFace(0, 6)); + } + + [Test] + public void CanMergeFaces() + { + PlanktonMesh pMesh = new PlanktonMesh(); + + // Create one vertex for each corner of a square + pMesh.Vertices.Add(0, 0, 0); // 0 + pMesh.Vertices.Add(1, 0, 0); // 1 + pMesh.Vertices.Add(1, 1, 0); // 2 + pMesh.Vertices.Add(0, 1, 0); // 3 + + // Create two triangular faces + pMesh.Faces.AddFace(0, 1, 2); + pMesh.Faces.AddFace(2, 3, 0); + + // Force merge to update outgoing halfedge of vertex #2 + pMesh.Vertices[2].OutgoingHalfedge = 4; + + // Merge faces + int h_rtn = pMesh.Faces.MergeFaces(4); + + // Check that the correct face was retained + int f = pMesh.Halfedges[h_rtn].AdjacentFace; + Assert.AreEqual(0, f); + + // Check face halfedges + int[] fhs = pMesh.Faces.GetHalfedges(f); + Assert.AreEqual(new int[] { 0, 2, 6, 8 }, fhs); + foreach (int h in fhs) + { + Assert.AreEqual(f, pMesh.Halfedges[h].AdjacentFace); + } + + // Check that outgoing halfedge of vertex #2 was updated correctly + Assert.AreEqual(6, pMesh.Vertices[2].OutgoingHalfedge); + Assert.AreEqual(f, pMesh.Halfedges[6].AdjacentFace); + } + + [Test] + public void CannotMergeFacesBoundary() + { + PlanktonMesh pMesh = new PlanktonMesh(); + + // Create one vertex for each corner of a square + pMesh.Vertices.Add(0, 0, 0); // 0 + pMesh.Vertices.Add(1, 0, 0); // 1 + pMesh.Vertices.Add(1, 1, 0); // 2 + pMesh.Vertices.Add(0, 1, 0); // 3 + + // Create one quadrangular face + pMesh.Faces.AddFace(0, 1, 2, 3); + + Assert.AreEqual(-1, pMesh.Faces.MergeFaces(0)); + } + + [Test] + public void CannotMergeFacesAntenna() + { + PlanktonMesh pMesh = new PlanktonMesh(); + + // Create one vertex for each corner of a square + pMesh.Vertices.Add(0, 0, 0); // 0 + pMesh.Vertices.Add(1, 0, 0); // 1 + pMesh.Vertices.Add(1, 1, 0); // 2 + pMesh.Vertices.Add(0, 1, 0); // 3 + pMesh.Vertices.Add(0.5, 0.5, 0); // 4 + + // Create two quadrangular faces + pMesh.Faces.AddFace(0, 1, 2, 4); + pMesh.Faces.AddFace(2, 3, 0, 4); + + // Merge should fail (faces are joined by two edges) + Assert.AreEqual(-1, pMesh.Faces.MergeFaces(4)); + } + + [Test] + public void CanSplitMergeInvariant() + { + PlanktonMesh pMesh = new PlanktonMesh(); + + // Create one vertex for each corner of a square + pMesh.Vertices.Add(0, 0, 0); // 0 + pMesh.Vertices.Add(1, 0, 0); // 1 + pMesh.Vertices.Add(1, 1, 0); // 2 + pMesh.Vertices.Add(0, 1, 0); // 3 + + // Create one quadrangular face + pMesh.Faces.AddFace(0, 1, 2, 3); + + int start_he = 0; + + // Split face into two triangles + int new_he = pMesh.Faces.SplitFace(start_he, 4); + + // Merge them back again + int old_he = pMesh.Faces.MergeFaces(new_he); + + // We should be back where we started... + Assert.AreEqual(start_he, old_he); + } + + [Test] + public void CanRemoveFace() + { + PlanktonMesh pMesh = new PlanktonMesh(); + + // Create one vertex for each corner of a square + pMesh.Vertices.Add(0, 0, 0); // 0 + pMesh.Vertices.Add(1, 0, 0); // 1 + pMesh.Vertices.Add(1, 1, 0); // 2 + pMesh.Vertices.Add(0, 1, 0); // 3 + pMesh.Vertices.Add(2, 0, 0); // 4 + pMesh.Vertices.Add(2, 1, 0); // 5 + + // Create two quadrangular faces + pMesh.Faces.AddFace(0, 1, 2, 3); + pMesh.Faces.AddFace(1, 4, 5, 2); + + // Traverse around mesh boundary and count halfedges + int count, he_first, he_current; + count = 0; + he_first = 1; + he_current = he_first; + do + { + count++; + he_current = pMesh.Halfedges[he_current].NextHalfedge; + } + while (he_current != he_first); + + Assert.AreEqual(6, count); + + // Remove the second face + pMesh.Faces.RemoveFace(1); + + // Count again... + count = 0; + he_first = 1; + he_current = he_first; + do + { + count++; + he_current = pMesh.Halfedges[he_current].NextHalfedge; + } + while (he_current != he_first); + + Assert.AreEqual(4, count); + } + + [Test] + public void CanCompact() + { + PlanktonMesh pMesh = new PlanktonMesh(); + + // Create one vertex for each corner of a square + pMesh.Vertices.Add(0, 0, 0); // 0 + pMesh.Vertices.Add(1, 0, 0); // 1 + pMesh.Vertices.Add(1, 1, 0); // 2 + pMesh.Vertices.Add(0, 1, 0); // 3 + + // Create two triangular faces + pMesh.Faces.AddFace(0, 1, 2); + pMesh.Faces.AddFace(2, 3, 0); + + // Merge faces and compact (squashing face #0) + pMesh.Faces.MergeFaces(4); + pMesh.Faces.CompactHelper(); + + // Check some things about the compacted mesh + Assert.AreEqual(1, pMesh.Faces.Count); + Assert.AreEqual(new int[] { 0, 1, 2, 3 }, pMesh.Faces.GetFaceVertices(0)); + } + + [Test] + public void CanTraverseUnusedFace() + { + PlanktonMesh pMesh = new PlanktonMesh(); + + // Add a single unset face + pMesh.Faces.Add(PlanktonFace.Unset); + + Assert.IsEmpty(pMesh.Faces.GetHalfedges(0)); + } + + [Test] + public void CanAddManyFaces() + { + PlanktonMesh pMesh = new PlanktonMesh(); + + // Create one vertex for each corner of a square + pMesh.Vertices.Add(0, 0, 0); // 0 + pMesh.Vertices.Add(1, 0, 0); // 1 + pMesh.Vertices.Add(1, 1, 0); // 2 + pMesh.Vertices.Add(0, 1, 0); // 3 + + // Create two triangular faces + var faces = new int[][] + { + new int[] { 0, 1, 2 }, + new int[] { 3, 2, 1 } + }; + var retval = pMesh.Faces.AddFaces(faces); + + Assert.AreEqual(new int[] { 0, 1 }, retval); + Assert.AreEqual(2, pMesh.Faces.Count); + Assert.AreEqual(faces[1], pMesh.Faces.GetFaceVertices(1)); + } + + [Test] + public void CanDeleteFaceAndAddFace() + { + /* + + 0 - 4 - 8 - 12 + | 0 | 3 | 6 | + 1 - 5 - 9 - 13 + | 1 | 4 | 7 | + 2 - 6 - 10- 14 + | 2 | 5 | 8 | + 3 - 7 - 11- 15 + + */ + + PlanktonMesh pMesh = new PlanktonMesh(); + + // Create 4x4 grid of vertices + pMesh.Vertices.Add(0, 3, 0); // 0 + pMesh.Vertices.Add(0, 2, 0); // 1 + pMesh.Vertices.Add(0, 1, 0); // 2 + pMesh.Vertices.Add(0, 0, 0); // 3 + pMesh.Vertices.Add(1, 3, 0); // 4 + pMesh.Vertices.Add(1, 2, 0); // 5 + pMesh.Vertices.Add(1, 1, 0); // 6 + pMesh.Vertices.Add(1, 0, 0); // 7 + pMesh.Vertices.Add(2, 3, 0); // 8 + pMesh.Vertices.Add(2, 2, 0); // 9 + pMesh.Vertices.Add(2, 1, 0); // 10 + pMesh.Vertices.Add(2, 0, 0); // 11 + pMesh.Vertices.Add(3, 3, 0); // 12 + pMesh.Vertices.Add(3, 2, 0); // 13 + pMesh.Vertices.Add(3, 1, 0); // 14 + pMesh.Vertices.Add(3, 0, 0); // 15 + + // Create nine quadrangular faces + pMesh.Faces.AddFace(0, 1, 5, 4); // 0 + pMesh.Faces.AddFace(1, 2, 6, 5); // 1 + pMesh.Faces.AddFace(2, 3, 7, 6); // 2 + pMesh.Faces.AddFace(4, 5, 9, 8); // 3 + pMesh.Faces.AddFace(5, 6, 10, 9); // 4 + pMesh.Faces.AddFace(6, 7, 11, 10); // 5 + pMesh.Faces.AddFace(8, 9, 13, 12); // 6 + pMesh.Faces.AddFace(9, 10, 14, 13); // 7 + pMesh.Faces.AddFace(10, 11, 15, 14); // 8 + + int id = 4; // center face + + // Get old face info + var faceCirculator = pMesh.Halfedges.GetFaceCirculator(pMesh.Faces[id].FirstHalfedge); + var faceVerts = new List(); + foreach (var i in faceCirculator) + { + faceVerts.Add(pMesh.Halfedges[i].StartVertex); + } + + // All face vertices should be internal (not boundary) + foreach (int i in faceVerts) + { + Assert.IsFalse(pMesh.Vertices.IsBoundary(i)); + } + + // Delete old face + pMesh.Faces.RemoveFace(id); + + // All face vertices should now be on a boundary + foreach (int i in faceVerts) + { + Assert.IsTrue(pMesh.Vertices.IsBoundary(i)); + } + + // pMesh.Compact(); + + // Re-add face + var res = pMesh.Faces.AddFace(faceVerts); + Assert.AreNotEqual(-1, res); + Assert.AreEqual(9, res); // res == 8 if mesh compacted beforehand + } + } +} diff --git a/src/PlanktonTests/HalfedgeTest.cs b/src/PlanktonTests/HalfedgeTest.cs index 8353c32..1d92f3d 100644 --- a/src/PlanktonTests/HalfedgeTest.cs +++ b/src/PlanktonTests/HalfedgeTest.cs @@ -1,470 +1,470 @@ -using System; -using System.Linq; -using NUnit.Framework; - -namespace Plankton.Test -{ - [TestFixture] - public class HalfedgeTest - { - [Test] - public void CanFindHalfedge() - { - // Create a mesh with a single quad face - PlanktonMesh pMesh = new PlanktonMesh(); - pMesh.Vertices.Add(0, 0, 0); - pMesh.Vertices.Add(1, 0, 0); - pMesh.Vertices.Add(1, 1, 0); - pMesh.Vertices.Add(0, 1, 0); - pMesh.Faces.AddFace(0, 1, 2, 3); - // Try and find some halfedges... - Assert.AreEqual(0, pMesh.Halfedges.FindHalfedge(0, 1)); - Assert.AreEqual(2, pMesh.Halfedges.FindHalfedge(1, 2)); - Assert.AreEqual(-1, pMesh.Halfedges.FindHalfedge(0, 2)); - } - - [Test] - public void CanFindHalfedgeUnusedVertices() - { - PlanktonMesh pMesh = new PlanktonMesh(); - pMesh.Vertices.Add(0, 0, 0); - pMesh.Vertices.Add(1, 1, 1); - // Check for halfedge between v0 and v1 - // In fact, both are unused so we shouldn't find one - Assert.AreEqual(-1, pMesh.Halfedges.FindHalfedge(0, 1)); - } - - [Test] - public void CanFlipEdge() - { - // Create a triangulated grid and flip one of the edges. - // - // Before >>> After - // - // 2---5---8 2---5- - // |\ | /| | /| - // | \ | / | | / | - // | \|/ | |/ |/ - // 1---4---7 1---4- - // | /|\ | | /|\ - // | / | \ | - // |/ | \| (etc.) - // 0---3---6 - // - - PlanktonMesh pMesh = new PlanktonMesh(); - - pMesh.Vertices.Add(-0.5, -0.5, 0.0); // 0 - pMesh.Vertices.Add(-0.5, 0.0, 0.0); // 1 - pMesh.Vertices.Add(-0.5, 0.5, 0.0); // 2 - pMesh.Vertices.Add(0.0, -0.5, 0.0); // 3 - pMesh.Vertices.Add(0.0, 0.0, 0.0); // 4 - pMesh.Vertices.Add(0.0, 0.5, 0.0); // 5 - pMesh.Vertices.Add(0.5, -0.5, 0.0); // 6 - pMesh.Vertices.Add(0.5, 0.0, 0.0); // 7 - pMesh.Vertices.Add(0.5, 0.5, 0.0); // 8 - - pMesh.Faces.AddFace(4, 1, 0); // 0 - pMesh.Faces.AddFace(4, 0, 3); // 1 - pMesh.Faces.AddFace(4, 3, 6); // 2 - pMesh.Faces.AddFace(4, 6, 7); // 3 - pMesh.Faces.AddFace(4, 7, 8); // 4 - pMesh.Faces.AddFace(4, 8, 5); // 5 - pMesh.Faces.AddFace(4, 5, 2); // 6 - pMesh.Faces.AddFace(4, 2, 1); // 7 - - // Find the outgoing halfedge of Vertex #4 (center) - - int he = pMesh.Vertices[4].OutgoingHalfedge; - - Assert.AreEqual(29, he); - - Assert.IsTrue(pMesh.Halfedges.FlipEdge(he)); - - // Check vertices for each face - Assert.AreEqual(new int[]{ 1, 5, 2 }, pMesh.Faces.GetFaceVertices(6)); - Assert.AreEqual(new int[]{ 5, 1, 4 }, pMesh.Faces.GetFaceVertices(7)); - - // Check outgoing he of Vertex #4 has been updated - he = pMesh.Vertices[4].OutgoingHalfedge; - Assert.AreNotEqual(29, he, "Vertex #4 should not be linked to Halfedge #29 post-flip"); - Assert.AreEqual(25, he); - - // Check adjacent face in each interior halfedge is correct - foreach (int h in pMesh.Faces.GetHalfedges(0)) - { - Assert.AreEqual(0, pMesh.Halfedges[h].AdjacentFace); - } - foreach (int h in pMesh.Faces.GetHalfedges(1)) - { - Assert.AreEqual(1, pMesh.Halfedges[h].AdjacentFace); - } - - // Check halfedges for each vertex - if (pMesh.Vertices.GetHalfedges(4).Contains(29)) - Assert.Fail("Vertex #4 should not be linked to Halfedge #29 post-flip"); - if (pMesh.Vertices.GetHalfedges(2).Contains(28)) - Assert.Fail("Vertex #2 should not be linked to Halfedge #28 post-flip"); - Assert.Contains(29, pMesh.Vertices.GetHalfedges(5), - "Vertex #5 should now be linked to Halfedge #29"); - Assert.Contains(28, pMesh.Vertices.GetHalfedges(1), - "Vertex #1 should now be linked to Halfedge #28"); - } - - [Test] - public void CanSplitEdge() - { - PlanktonMesh pMesh = new PlanktonMesh(); - var hs = pMesh.Halfedges; - - // Create one vertex for each corner of a square - pMesh.Vertices.Add(0, 0, 0); // 0 - pMesh.Vertices.Add(1, 0, 0); // 1 - pMesh.Vertices.Add(1, 1, 0); // 2 - pMesh.Vertices.Add(0, 1, 0); // 3 - - // Create two triangular faces - pMesh.Faces.AddFace(0, 1, 2); - pMesh.Faces.AddFace(2, 3, 0); - - // Change outgoing of vert #2 so that we can check it updates - pMesh.Vertices[2].OutgoingHalfedge = 4; - - // Split the diagonal edge - int split_he = 5; // he from v #0 to #2 - int new_he = hs.SplitEdge(split_he); - - // Returned halfedge should start at the new vertex - Assert.AreEqual(4, hs[new_he].StartVertex); - - // Check that the 4 halfedges are all in the right places... - // New ones are between new vertex and second vertex - Assert.AreEqual(new_he, hs.FindHalfedge(4, 2)); - Assert.AreEqual(hs.GetPairHalfedge(new_he), hs.FindHalfedge(2, 4)); - // Existing ones are now between first vertex and new vertex - Assert.AreEqual(split_he, hs.FindHalfedge(0, 4)); - Assert.AreEqual(hs.GetPairHalfedge(split_he), hs.FindHalfedge(4, 0)); - - // New halfedges should have the same faces as the existing ones next to them - Assert.AreEqual(hs[split_he].AdjacentFace, hs[new_he].AdjacentFace); - Assert.AreEqual(hs[hs.GetPairHalfedge(split_he)].AdjacentFace, - hs[hs.GetPairHalfedge(new_he)].AdjacentFace); - - // New vertex's outgoing should be returned halfedge - Assert.AreEqual(new_he, pMesh.Vertices[4].OutgoingHalfedge); - - // New vertex should be 2-valent - Assert.AreEqual(2, pMesh.Vertices.GetHalfedges(4).Length); - - // Check existing vertices... - Assert.AreEqual(new int[] {9, 5, 0}, pMesh.Vertices.GetHalfedges(0)); - Assert.AreEqual(new int[] {11, 6, 3}, pMesh.Vertices.GetHalfedges(2)); - } - - [Test] - public void CanCollapseBoundaryEdge() - { - PlanktonMesh pMesh = new PlanktonMesh(); - - // Create one vertex for each corner of a square - pMesh.Vertices.Add(0, 0, 0); // 0 - pMesh.Vertices.Add(1, 0, 0); // 1 - pMesh.Vertices.Add(1, 1, 0); // 2 - pMesh.Vertices.Add(0, 1, 0); // 3 - - // Create one quadrangular face - pMesh.Faces.AddFace(0, 1, 2, 3); - - int h_clps = 0; - int h_clps_prev = pMesh.Halfedges[h_clps].PrevHalfedge; - - // Confirm face's first halfedge is the one to be collapsed - Assert.AreEqual(h_clps, pMesh.Faces[0].FirstHalfedge); - - // Collapse edge - int h_rtn = pMesh.Halfedges.CollapseEdge(h_clps); - - // Edge collapse should return successor around start vertex - Assert.AreEqual(7, h_rtn); - - // Check face's first halfedge was updated - Assert.AreNotEqual(h_clps, pMesh.Faces[0].FirstHalfedge); - - // Check for closed loop (without collapsed halfedge) - Assert.AreEqual(new int[] { 2, 4, 6 }, pMesh.Faces.GetHalfedges(0)); - - // Pair of predecessor to collapsed halfedge should now have its start vertex - int h_clps_prev_pair = pMesh.Halfedges.GetPairHalfedge(h_clps_prev); - Assert.AreEqual(0, pMesh.Halfedges[h_clps_prev_pair].StartVertex); - } - - [Test] - public void CanCollapseInternalEdge() - { - PlanktonMesh pMesh = new PlanktonMesh(); - - // Create a three-by-three grid of vertices - pMesh.Vertices.Add(-0.5, -0.5, 0.0); // 0 - pMesh.Vertices.Add(-0.5, 0.0, 0.0); // 1 - pMesh.Vertices.Add(-0.5, 0.5, 0.0); // 2 - pMesh.Vertices.Add(0.0, -0.5, 0.0); // 3 - pMesh.Vertices.Add(0.0, 0.0, 0.0); // 4 - pMesh.Vertices.Add(0.0, 0.5, 0.0); // 5 - pMesh.Vertices.Add(0.5, -0.5, 0.0); // 6 - pMesh.Vertices.Add(0.5, 0.0, 0.0); // 7 - pMesh.Vertices.Add(0.5, 0.5, 0.0); // 8 - - // Create four quadrangular faces - pMesh.Faces.AddFace(1, 4, 5, 2); - pMesh.Faces.AddFace(0, 3, 4, 1); - pMesh.Faces.AddFace(4, 7, 8, 5); - pMesh.Faces.AddFace(3, 6, 7, 4); - - Assert.AreEqual(4, pMesh.Faces.Count); - - int h_clps = pMesh.Vertices[4].OutgoingHalfedge; - int v_suc = pMesh.Vertices.GetHalfedges(4)[1]; - int h_boundary = pMesh.Vertices[3].OutgoingHalfedge; - - // Collapse center vertex's outgoing halfedge - int h_rtn = pMesh.Halfedges.CollapseEdge(h_clps); - - // Check that center vertex's outgoing halfedge has been updated - Assert.AreEqual(h_boundary, pMesh.Vertices[4].OutgoingHalfedge); - - // Edge collapse should return successor around start vertex - Assert.AreEqual(v_suc, h_rtn); - - // Check for closed loops (without collapsed halfedge) - Assert.AreEqual(4, pMesh.Faces.GetHalfedges(0).Length); - Assert.AreEqual(3, pMesh.Faces.GetHalfedges(1).Length); - Assert.AreEqual(4, pMesh.Faces.GetHalfedges(2).Length); - Assert.AreEqual(3, pMesh.Faces.GetHalfedges(3).Length); - - // Check no halfedges reference removed vertex (#7) - for (int h = 0; h < pMesh.Halfedges.Count; h++) - { - if (h == h_clps || h == pMesh.Halfedges.GetPairHalfedge(h_clps)) - continue; // Skip removed halfedges - Assert.AreNotEqual(3, pMesh.Halfedges[h].StartVertex); - } - } - - [Test] - public void CannotCollapseNonManifoldVertex() - { - PlanktonMesh pMesh = new PlanktonMesh(); - - // Create vertices in 3x2 grid - pMesh.Vertices.Add(0, 0, 0); // 0 - pMesh.Vertices.Add(1, 0, 0); // 1 - pMesh.Vertices.Add(1, 1, 0); // 2 - pMesh.Vertices.Add(0, 1, 0); // 3 - pMesh.Vertices.Add(2, 0, 0); // 4 - pMesh.Vertices.Add(2, 1, 0); // 5 - - // Create two quadrangular faces - pMesh.Faces.AddFace(0, 1, 2, 3); - pMesh.Faces.AddFace(1, 4, 5, 2); - - // Try to collapse edge between vertices #1 and #2 - // (which would make vertex #1 non-manifold) - int h = pMesh.Halfedges.FindHalfedge(1, 2); - Assert.AreEqual(-1, pMesh.Halfedges.CollapseEdge(h)); - - // That's right, you can't! - } - - [Test] - public void CanCollapseAdjacentTriangles() - { - // TODO: draw figure here... - - PlanktonMesh pMesh = new PlanktonMesh(); - - // Create several vertices - pMesh.Vertices.Add(0, 3, 0); // 0 - pMesh.Vertices.Add(0, 2, 0); // 1 - pMesh.Vertices.Add(0, 1, 0); // 2 - pMesh.Vertices.Add(1, 3, 0); // 3 - pMesh.Vertices.Add(1, 2, 0); // 4 - pMesh.Vertices.Add(1, 1, 0); // 5 - pMesh.Vertices.Add(1, 0, 0); // 6 - pMesh.Vertices.Add(2, 2, 0); // 7 - pMesh.Vertices.Add(2, 1, 0); // 8 - - // Create several faces - pMesh.Faces.AddFace(0, 1, 4, 3); // 0 - pMesh.Faces.AddFace(1, 2, 5, 4); // 1 - pMesh.Faces.AddFace(3, 4, 7); // 2 - pMesh.Faces.AddFace(4, 5, 7); // 3 - pMesh.Faces.AddFace(7, 5, 6, 8); // 4 - - // Try to collapse edge between vertices #4 and #7 - int h_clps = pMesh.Halfedges.FindHalfedge(4, 7); - //int v_keep = pMesh.Halfedges[h_clps].StartVertex; - int h_succ = pMesh.Halfedges.GetVertexCirculator(h_clps).ElementAt(1); - Assert.AreEqual(h_succ, pMesh.Halfedges.CollapseEdge(h_clps)); - - // Successor to h (around h's start vertex) should now be adjacent to face #4 - Assert.AreEqual(4, pMesh.Halfedges[h_succ].AdjacentFace); - - // Check new vertices of face #4 - Assert.AreEqual(new int[] { 5, 6, 8, 4 }, pMesh.Faces.GetFaceVertices(4)); - - // Traverse around mesh boundary and count halfedges - int count, he_first, he_current; - count = 0; - he_first = 1; - he_current = he_first; - do - { - count++; - he_current = pMesh.Halfedges[he_current].NextHalfedge; - } - while (he_current != he_first); - - Assert.AreEqual(8, count); - - Assert.IsTrue(pMesh.Faces[2].IsUnused && pMesh.Faces[3].IsUnused); - - Assert.AreEqual(25, pMesh.Halfedges[5].NextHalfedge); - Assert.AreEqual(5, pMesh.Halfedges[25].PrevHalfedge); - - foreach (int i in new int[] { 14, 15, 16, 17, 18, 19 }) - { - Assert.IsTrue(pMesh.Halfedges[i].IsUnused); - } - } - - [Test] - public void CanCollapseValenceThreeVertex() - { - PlanktonMesh pMesh = new PlanktonMesh(); - - // Create mesh with one triangular face - pMesh.Vertices.Add(0, 0, 0); // 0 - pMesh.Vertices.Add(2, 0, 0); // 1 - pMesh.Vertices.Add(1, 1.4, 0); // 2 - pMesh.Faces.AddFace(0, 1, 2); - - // create vertex at center and get a halfedge pointing *towards* it - int v = pMesh.Faces.Stellate(0); - int h = pMesh.Vertices.GetIncomingHalfedge(v); - - // count faces before collapse - Assert.AreEqual(3, pMesh.Faces.Count); - - // attempt to collapse one of the internal edges - Assert.GreaterOrEqual(0, pMesh.Halfedges.CollapseEdge(h)); - - // there should be 6 unused halfedges now... - Assert.AreEqual(6, pMesh.Halfedges.Where(q => q.IsUnused).Count()); - - // compact and count faces again - pMesh.Compact(); - Assert.AreEqual(1, pMesh.Faces.Count); - } - - [Test] - public void CannotCollapseNonManifoldEdge() - { - PlanktonMesh pMesh = new PlanktonMesh(); - - // Create vertices in 3x2 grid - pMesh.Vertices.Add(-1, 0, 0); // 0 - pMesh.Vertices.Add(0, -1, 0); // 1 - pMesh.Vertices.Add(1, 0, 0); // 2 - pMesh.Vertices.Add(0, 1, 0); // 3 - pMesh.Vertices.Add(-1, -2, 0); // 4 - pMesh.Vertices.Add(0, -3, 0); // 5 - pMesh.Vertices.Add(1, -2, 0); // 6 - - // Create several triangular faces - pMesh.Faces.AddFace(0, 1, 3); - pMesh.Faces.AddFace(1, 2, 3); - pMesh.Faces.AddFace(0, 4, 1); - pMesh.Faces.AddFace(4, 5, 1); - // And one quad face - pMesh.Faces.AddFace(1, 5, 6, 2); - - pMesh.Faces.Stellate(0); - pMesh.Faces.Stellate(1); - - Assert.AreEqual(9, pMesh.Faces.Count); - - Assert.AreEqual(-1, pMesh.Halfedges.CollapseEdge(2)); - Assert.AreEqual(-1, pMesh.Halfedges.CollapseEdge(6)); - } - - [Test] - public void CanCompact() - { - PlanktonMesh pMesh = new PlanktonMesh(); - - // Create vertices in 3x2 grid - pMesh.Vertices.Add(0, 0, 0); // 0 - pMesh.Vertices.Add(1, 0, 0); // 1 - pMesh.Vertices.Add(1, 1, 0); // 2 - pMesh.Vertices.Add(0, 1, 0); // 3 - pMesh.Vertices.Add(2, 0, 0); // 4 - pMesh.Vertices.Add(2, 1, 0); // 5 - - // Create two quadrangular faces - pMesh.Faces.AddFace(0, 1, 2, 3); - pMesh.Faces.AddFace(1, 4, 5, 2); - - // Remove the first face and compact - pMesh.Faces.RemoveFace(0); - pMesh.Halfedges.CompactHelper(); - - // Check some things about the compacted mesh - Assert.AreEqual(8, pMesh.Halfedges.Count); - Assert.AreEqual(new int[] { 1, 4, 5, 2 }, pMesh.Faces.GetFaceVertices(1)); - } - - [Test] - public void CannotTraverseUnusedHalfedge() - { - PlanktonMesh pMesh = new PlanktonMesh(); - pMesh.Halfedges.Add(PlanktonHalfedge.Unset); - pMesh.Halfedges.Add(PlanktonHalfedge.Unset); - - // You shouldn't be able to enumerate a circulator for either of these unset halfedges - Assert.Throws(() => pMesh.Halfedges.GetFaceCirculator(0).ToArray()); - Assert.Throws( - delegate { foreach (int h in pMesh.Halfedges.GetVertexCirculator(1)) {} } ); - } - - [TestCase(0)] - [TestCase(1)] - [TestCase(2)] - [TestCase(3)] - public void CanCollapseSameFace(int h) - { - // 3-------2 - // | f1 | Tries to collapse the halfedge... - // | | * 0 - from vertex 1 to vertex 4 - // | 4 | * 1 - from vertex 4 to vertex 1 - // | / \ | * 2 - from vertex 4 to vertex 0 - // |/ f0 \| * 3 - from vertex 0 to vertex 4 - // 0-------1 - - PlanktonMesh mesh = new PlanktonMesh(); - - mesh.Vertices.Add(0, 0, 0); // 0 - mesh.Vertices.Add(100, 0, 0); // 1 - mesh.Vertices.Add(100, 100, 0); // 2 - mesh.Vertices.Add(0, 100, 0); // 3 - mesh.Vertices.Add(50, 50, 0); // 4 - - mesh.Faces.AddFace(1, 4, 0); - mesh.Faces.AddFace(new int[] { 1, 2, 3, 0, 4 }); - - mesh.Halfedges.CollapseEdge(h); - - Assert.IsTrue(mesh.Faces[0].IsUnused, "face 0 should be unset"); - Assert.AreEqual(4, mesh.Faces.GetFaceVertices(1).Length, "face 1 should have 4 vertices"); - } - } -} +using System; +using System.Linq; +using NUnit.Framework; + +namespace Plankton.Test +{ + [TestFixture] + public class HalfedgeTest + { + [Test] + public void CanFindHalfedge() + { + // Create a mesh with a single quad face + PlanktonMesh pMesh = new PlanktonMesh(); + pMesh.Vertices.Add(0, 0, 0); + pMesh.Vertices.Add(1, 0, 0); + pMesh.Vertices.Add(1, 1, 0); + pMesh.Vertices.Add(0, 1, 0); + pMesh.Faces.AddFace(0, 1, 2, 3); + // Try and find some halfedges... + Assert.AreEqual(0, pMesh.Halfedges.FindHalfedge(0, 1)); + Assert.AreEqual(2, pMesh.Halfedges.FindHalfedge(1, 2)); + Assert.AreEqual(-1, pMesh.Halfedges.FindHalfedge(0, 2)); + } + + [Test] + public void CanFindHalfedgeUnusedVertices() + { + PlanktonMesh pMesh = new PlanktonMesh(); + pMesh.Vertices.Add(0, 0, 0); + pMesh.Vertices.Add(1, 1, 1); + // Check for halfedge between v0 and v1 + // In fact, both are unused so we shouldn't find one + Assert.AreEqual(-1, pMesh.Halfedges.FindHalfedge(0, 1)); + } + + [Test] + public void CanFlipEdge() + { + // Create a triangulated grid and flip one of the edges. + // + // Before >>> After + // + // 2---5---8 2---5- + // |\ | /| | /| + // | \ | / | | / | + // | \|/ | |/ |/ + // 1---4---7 1---4- + // | /|\ | | /|\ + // | / | \ | + // |/ | \| (etc.) + // 0---3---6 + // + + PlanktonMesh pMesh = new PlanktonMesh(); + + pMesh.Vertices.Add(-0.5, -0.5, 0.0); // 0 + pMesh.Vertices.Add(-0.5, 0.0, 0.0); // 1 + pMesh.Vertices.Add(-0.5, 0.5, 0.0); // 2 + pMesh.Vertices.Add(0.0, -0.5, 0.0); // 3 + pMesh.Vertices.Add(0.0, 0.0, 0.0); // 4 + pMesh.Vertices.Add(0.0, 0.5, 0.0); // 5 + pMesh.Vertices.Add(0.5, -0.5, 0.0); // 6 + pMesh.Vertices.Add(0.5, 0.0, 0.0); // 7 + pMesh.Vertices.Add(0.5, 0.5, 0.0); // 8 + + pMesh.Faces.AddFace(4, 1, 0); // 0 + pMesh.Faces.AddFace(4, 0, 3); // 1 + pMesh.Faces.AddFace(4, 3, 6); // 2 + pMesh.Faces.AddFace(4, 6, 7); // 3 + pMesh.Faces.AddFace(4, 7, 8); // 4 + pMesh.Faces.AddFace(4, 8, 5); // 5 + pMesh.Faces.AddFace(4, 5, 2); // 6 + pMesh.Faces.AddFace(4, 2, 1); // 7 + + // Find the outgoing halfedge of Vertex #4 (center) + + int he = pMesh.Vertices[4].OutgoingHalfedge; + + Assert.AreEqual(29, he); + + Assert.IsTrue(pMesh.Halfedges.FlipEdge(he)); + + // Check vertices for each face + Assert.AreEqual(new int[]{ 1, 5, 2 }, pMesh.Faces.GetFaceVertices(6)); + Assert.AreEqual(new int[]{ 5, 1, 4 }, pMesh.Faces.GetFaceVertices(7)); + + // Check outgoing he of Vertex #4 has been updated + he = pMesh.Vertices[4].OutgoingHalfedge; + Assert.AreNotEqual(29, he, "Vertex #4 should not be linked to Halfedge #29 post-flip"); + Assert.AreEqual(25, he); + + // Check adjacent face in each interior halfedge is correct + foreach (int h in pMesh.Faces.GetHalfedges(0)) + { + Assert.AreEqual(0, pMesh.Halfedges[h].AdjacentFace); + } + foreach (int h in pMesh.Faces.GetHalfedges(1)) + { + Assert.AreEqual(1, pMesh.Halfedges[h].AdjacentFace); + } + + // Check halfedges for each vertex + if (pMesh.Vertices.GetHalfedges(4).Contains(29)) + Assert.Fail("Vertex #4 should not be linked to Halfedge #29 post-flip"); + if (pMesh.Vertices.GetHalfedges(2).Contains(28)) + Assert.Fail("Vertex #2 should not be linked to Halfedge #28 post-flip"); + Assert.Contains(29, pMesh.Vertices.GetHalfedges(5), + "Vertex #5 should now be linked to Halfedge #29"); + Assert.Contains(28, pMesh.Vertices.GetHalfedges(1), + "Vertex #1 should now be linked to Halfedge #28"); + } + + [Test] + public void CanSplitEdge() + { + PlanktonMesh pMesh = new PlanktonMesh(); + var hs = pMesh.Halfedges; + + // Create one vertex for each corner of a square + pMesh.Vertices.Add(0, 0, 0); // 0 + pMesh.Vertices.Add(1, 0, 0); // 1 + pMesh.Vertices.Add(1, 1, 0); // 2 + pMesh.Vertices.Add(0, 1, 0); // 3 + + // Create two triangular faces + pMesh.Faces.AddFace(0, 1, 2); + pMesh.Faces.AddFace(2, 3, 0); + + // Change outgoing of vert #2 so that we can check it updates + pMesh.Vertices[2].OutgoingHalfedge = 4; + + // Split the diagonal edge + int split_he = 5; // he from v #0 to #2 + int new_he = hs.SplitEdge(split_he); + + // Returned halfedge should start at the new vertex + Assert.AreEqual(4, hs[new_he].StartVertex); + + // Check that the 4 halfedges are all in the right places... + // New ones are between new vertex and second vertex + Assert.AreEqual(new_he, hs.FindHalfedge(4, 2)); + Assert.AreEqual(hs.GetPairHalfedge(new_he), hs.FindHalfedge(2, 4)); + // Existing ones are now between first vertex and new vertex + Assert.AreEqual(split_he, hs.FindHalfedge(0, 4)); + Assert.AreEqual(hs.GetPairHalfedge(split_he), hs.FindHalfedge(4, 0)); + + // New halfedges should have the same faces as the existing ones next to them + Assert.AreEqual(hs[split_he].AdjacentFace, hs[new_he].AdjacentFace); + Assert.AreEqual(hs[hs.GetPairHalfedge(split_he)].AdjacentFace, + hs[hs.GetPairHalfedge(new_he)].AdjacentFace); + + // New vertex's outgoing should be returned halfedge + Assert.AreEqual(new_he, pMesh.Vertices[4].OutgoingHalfedge); + + // New vertex should be 2-valent + Assert.AreEqual(2, pMesh.Vertices.GetHalfedges(4).Length); + + // Check existing vertices... + Assert.AreEqual(new int[] {9, 5, 0}, pMesh.Vertices.GetHalfedges(0)); + Assert.AreEqual(new int[] {11, 6, 3}, pMesh.Vertices.GetHalfedges(2)); + } + + [Test] + public void CanCollapseBoundaryEdge() + { + PlanktonMesh pMesh = new PlanktonMesh(); + + // Create one vertex for each corner of a square + pMesh.Vertices.Add(0, 0, 0); // 0 + pMesh.Vertices.Add(1, 0, 0); // 1 + pMesh.Vertices.Add(1, 1, 0); // 2 + pMesh.Vertices.Add(0, 1, 0); // 3 + + // Create one quadrangular face + pMesh.Faces.AddFace(0, 1, 2, 3); + + int h_clps = 0; + int h_clps_prev = pMesh.Halfedges[h_clps].PrevHalfedge; + + // Confirm face's first halfedge is the one to be collapsed + Assert.AreEqual(h_clps, pMesh.Faces[0].FirstHalfedge); + + // Collapse edge + int h_rtn = pMesh.Halfedges.CollapseEdge(h_clps); + + // Edge collapse should return successor around start vertex + Assert.AreEqual(7, h_rtn); + + // Check face's first halfedge was updated + Assert.AreNotEqual(h_clps, pMesh.Faces[0].FirstHalfedge); + + // Check for closed loop (without collapsed halfedge) + Assert.AreEqual(new int[] { 2, 4, 6 }, pMesh.Faces.GetHalfedges(0)); + + // Pair of predecessor to collapsed halfedge should now have its start vertex + int h_clps_prev_pair = pMesh.Halfedges.GetPairHalfedge(h_clps_prev); + Assert.AreEqual(0, pMesh.Halfedges[h_clps_prev_pair].StartVertex); + } + + [Test] + public void CanCollapseInternalEdge() + { + PlanktonMesh pMesh = new PlanktonMesh(); + + // Create a three-by-three grid of vertices + pMesh.Vertices.Add(-0.5, -0.5, 0.0); // 0 + pMesh.Vertices.Add(-0.5, 0.0, 0.0); // 1 + pMesh.Vertices.Add(-0.5, 0.5, 0.0); // 2 + pMesh.Vertices.Add(0.0, -0.5, 0.0); // 3 + pMesh.Vertices.Add(0.0, 0.0, 0.0); // 4 + pMesh.Vertices.Add(0.0, 0.5, 0.0); // 5 + pMesh.Vertices.Add(0.5, -0.5, 0.0); // 6 + pMesh.Vertices.Add(0.5, 0.0, 0.0); // 7 + pMesh.Vertices.Add(0.5, 0.5, 0.0); // 8 + + // Create four quadrangular faces + pMesh.Faces.AddFace(1, 4, 5, 2); + pMesh.Faces.AddFace(0, 3, 4, 1); + pMesh.Faces.AddFace(4, 7, 8, 5); + pMesh.Faces.AddFace(3, 6, 7, 4); + + Assert.AreEqual(4, pMesh.Faces.Count); + + int h_clps = pMesh.Vertices[4].OutgoingHalfedge; + int v_suc = pMesh.Vertices.GetHalfedges(4)[1]; + int h_boundary = pMesh.Vertices[3].OutgoingHalfedge; + + // Collapse center vertex's outgoing halfedge + int h_rtn = pMesh.Halfedges.CollapseEdge(h_clps); + + // Check that center vertex's outgoing halfedge has been updated + Assert.AreEqual(h_boundary, pMesh.Vertices[4].OutgoingHalfedge); + + // Edge collapse should return successor around start vertex + Assert.AreEqual(v_suc, h_rtn); + + // Check for closed loops (without collapsed halfedge) + Assert.AreEqual(4, pMesh.Faces.GetHalfedges(0).Length); + Assert.AreEqual(3, pMesh.Faces.GetHalfedges(1).Length); + Assert.AreEqual(4, pMesh.Faces.GetHalfedges(2).Length); + Assert.AreEqual(3, pMesh.Faces.GetHalfedges(3).Length); + + // Check no halfedges reference removed vertex (#7) + for (int h = 0; h < pMesh.Halfedges.Count; h++) + { + if (h == h_clps || h == pMesh.Halfedges.GetPairHalfedge(h_clps)) + continue; // Skip removed halfedges + Assert.AreNotEqual(3, pMesh.Halfedges[h].StartVertex); + } + } + + [Test] + public void CannotCollapseNonManifoldVertex() + { + PlanktonMesh pMesh = new PlanktonMesh(); + + // Create vertices in 3x2 grid + pMesh.Vertices.Add(0, 0, 0); // 0 + pMesh.Vertices.Add(1, 0, 0); // 1 + pMesh.Vertices.Add(1, 1, 0); // 2 + pMesh.Vertices.Add(0, 1, 0); // 3 + pMesh.Vertices.Add(2, 0, 0); // 4 + pMesh.Vertices.Add(2, 1, 0); // 5 + + // Create two quadrangular faces + pMesh.Faces.AddFace(0, 1, 2, 3); + pMesh.Faces.AddFace(1, 4, 5, 2); + + // Try to collapse edge between vertices #1 and #2 + // (which would make vertex #1 non-manifold) + int h = pMesh.Halfedges.FindHalfedge(1, 2); + Assert.AreEqual(-1, pMesh.Halfedges.CollapseEdge(h)); + + // That's right, you can't! + } + + [Test] + public void CanCollapseAdjacentTriangles() + { + // TODO: draw figure here... + + PlanktonMesh pMesh = new PlanktonMesh(); + + // Create several vertices + pMesh.Vertices.Add(0, 3, 0); // 0 + pMesh.Vertices.Add(0, 2, 0); // 1 + pMesh.Vertices.Add(0, 1, 0); // 2 + pMesh.Vertices.Add(1, 3, 0); // 3 + pMesh.Vertices.Add(1, 2, 0); // 4 + pMesh.Vertices.Add(1, 1, 0); // 5 + pMesh.Vertices.Add(1, 0, 0); // 6 + pMesh.Vertices.Add(2, 2, 0); // 7 + pMesh.Vertices.Add(2, 1, 0); // 8 + + // Create several faces + pMesh.Faces.AddFace(0, 1, 4, 3); // 0 + pMesh.Faces.AddFace(1, 2, 5, 4); // 1 + pMesh.Faces.AddFace(3, 4, 7); // 2 + pMesh.Faces.AddFace(4, 5, 7); // 3 + pMesh.Faces.AddFace(7, 5, 6, 8); // 4 + + // Try to collapse edge between vertices #4 and #7 + int h_clps = pMesh.Halfedges.FindHalfedge(4, 7); + //int v_keep = pMesh.Halfedges[h_clps].StartVertex; + int h_succ = pMesh.Halfedges.GetVertexCirculator(h_clps).ElementAt(1); + Assert.AreEqual(h_succ, pMesh.Halfedges.CollapseEdge(h_clps)); + + // Successor to h (around h's start vertex) should now be adjacent to face #4 + Assert.AreEqual(4, pMesh.Halfedges[h_succ].AdjacentFace); + + // Check new vertices of face #4 + Assert.AreEqual(new int[] { 5, 6, 8, 4 }, pMesh.Faces.GetFaceVertices(4)); + + // Traverse around mesh boundary and count halfedges + int count, he_first, he_current; + count = 0; + he_first = 1; + he_current = he_first; + do + { + count++; + he_current = pMesh.Halfedges[he_current].NextHalfedge; + } + while (he_current != he_first); + + Assert.AreEqual(8, count); + + Assert.IsTrue(pMesh.Faces[2].IsUnused && pMesh.Faces[3].IsUnused); + + Assert.AreEqual(25, pMesh.Halfedges[5].NextHalfedge); + Assert.AreEqual(5, pMesh.Halfedges[25].PrevHalfedge); + + foreach (int i in new int[] { 14, 15, 16, 17, 18, 19 }) + { + Assert.IsTrue(pMesh.Halfedges[i].IsUnused); + } + } + + [Test] + public void CanCollapseValenceThreeVertex() + { + PlanktonMesh pMesh = new PlanktonMesh(); + + // Create mesh with one triangular face + pMesh.Vertices.Add(0, 0, 0); // 0 + pMesh.Vertices.Add(2, 0, 0); // 1 + pMesh.Vertices.Add(1, 1.4, 0); // 2 + pMesh.Faces.AddFace(0, 1, 2); + + // create vertex at center and get a halfedge pointing *towards* it + int v = pMesh.Faces.Stellate(0); + int h = pMesh.Vertices.GetIncomingHalfedge(v); + + // count faces before collapse + Assert.AreEqual(3, pMesh.Faces.Count); + + // attempt to collapse one of the internal edges + Assert.GreaterOrEqual(0, pMesh.Halfedges.CollapseEdge(h)); + + // there should be 6 unused halfedges now... + Assert.AreEqual(6, pMesh.Halfedges.Where(q => q.IsUnused).Count()); + + // compact and count faces again + pMesh.Compact(); + Assert.AreEqual(1, pMesh.Faces.Count); + } + + [Test] + public void CannotCollapseNonManifoldEdge() + { + PlanktonMesh pMesh = new PlanktonMesh(); + + // Create vertices in 3x2 grid + pMesh.Vertices.Add(-1, 0, 0); // 0 + pMesh.Vertices.Add(0, -1, 0); // 1 + pMesh.Vertices.Add(1, 0, 0); // 2 + pMesh.Vertices.Add(0, 1, 0); // 3 + pMesh.Vertices.Add(-1, -2, 0); // 4 + pMesh.Vertices.Add(0, -3, 0); // 5 + pMesh.Vertices.Add(1, -2, 0); // 6 + + // Create several triangular faces + pMesh.Faces.AddFace(0, 1, 3); + pMesh.Faces.AddFace(1, 2, 3); + pMesh.Faces.AddFace(0, 4, 1); + pMesh.Faces.AddFace(4, 5, 1); + // And one quad face + pMesh.Faces.AddFace(1, 5, 6, 2); + + pMesh.Faces.Stellate(0); + pMesh.Faces.Stellate(1); + + Assert.AreEqual(9, pMesh.Faces.Count); + + Assert.AreEqual(-1, pMesh.Halfedges.CollapseEdge(2)); + Assert.AreEqual(-1, pMesh.Halfedges.CollapseEdge(6)); + } + + [Test] + public void CanCompact() + { + PlanktonMesh pMesh = new PlanktonMesh(); + + // Create vertices in 3x2 grid + pMesh.Vertices.Add(0, 0, 0); // 0 + pMesh.Vertices.Add(1, 0, 0); // 1 + pMesh.Vertices.Add(1, 1, 0); // 2 + pMesh.Vertices.Add(0, 1, 0); // 3 + pMesh.Vertices.Add(2, 0, 0); // 4 + pMesh.Vertices.Add(2, 1, 0); // 5 + + // Create two quadrangular faces + pMesh.Faces.AddFace(0, 1, 2, 3); + pMesh.Faces.AddFace(1, 4, 5, 2); + + // Remove the first face and compact + pMesh.Faces.RemoveFace(0); + pMesh.Halfedges.CompactHelper(); + + // Check some things about the compacted mesh + Assert.AreEqual(8, pMesh.Halfedges.Count); + Assert.AreEqual(new int[] { 1, 4, 5, 2 }, pMesh.Faces.GetFaceVertices(1)); + } + + [Test] + public void CannotTraverseUnusedHalfedge() + { + PlanktonMesh pMesh = new PlanktonMesh(); + pMesh.Halfedges.Add(PlanktonHalfedge.Unset); + pMesh.Halfedges.Add(PlanktonHalfedge.Unset); + + // You shouldn't be able to enumerate a circulator for either of these unset halfedges + Assert.Throws(() => pMesh.Halfedges.GetFaceCirculator(0).ToArray()); + Assert.Throws( + delegate { foreach (int h in pMesh.Halfedges.GetVertexCirculator(1)) {} } ); + } + + [TestCase(0)] + [TestCase(1)] + [TestCase(2)] + [TestCase(3)] + public void CanCollapseSameFace(int h) + { + // 3-------2 + // | f1 | Tries to collapse the halfedge... + // | | * 0 - from vertex 1 to vertex 4 + // | 4 | * 1 - from vertex 4 to vertex 1 + // | / \ | * 2 - from vertex 4 to vertex 0 + // |/ f0 \| * 3 - from vertex 0 to vertex 4 + // 0-------1 + + PlanktonMesh mesh = new PlanktonMesh(); + + mesh.Vertices.Add(0, 0, 0); // 0 + mesh.Vertices.Add(100, 0, 0); // 1 + mesh.Vertices.Add(100, 100, 0); // 2 + mesh.Vertices.Add(0, 100, 0); // 3 + mesh.Vertices.Add(50, 50, 0); // 4 + + mesh.Faces.AddFace(1, 4, 0); + mesh.Faces.AddFace(new int[] { 1, 2, 3, 0, 4 }); + + mesh.Halfedges.CollapseEdge(h); + + Assert.IsTrue(mesh.Faces[0].IsUnused, "face 0 should be unset"); + Assert.AreEqual(4, mesh.Faces.GetFaceVertices(1).Length, "face 1 should have 4 vertices"); + } + } +} diff --git a/src/PlanktonTests/PlanktonTests.csproj b/src/PlanktonTests/PlanktonTests.csproj index d189822..03f5fdf 100644 --- a/src/PlanktonTests/PlanktonTests.csproj +++ b/src/PlanktonTests/PlanktonTests.csproj @@ -1,63 +1,74 @@ - - - - {8FC24017-EDF3-4747-B966-C29DBD18103F} - Debug - AnyCPU - Library - Plankton.Test - PlanktonTests - v4.0 - Properties - - - AnyCPU - - - ..\..\bin\Debug\ - True - Full - False - True - DEBUG;TRACE - - - ..\..\bin\Release\ - False - None - True - False - TRACE - - - - ..\..\lib\nunit.framework.dll - True - - - - 3.5 - - - - 3.5 - - - - - - - - - - - - {BDD288F7-C2E2-4C2A-B083-E4D4D21F528F} - Plankton - - - - - - + + + + {8FC24017-EDF3-4747-B966-C29DBD18103F} + Debug + AnyCPU + Library + Plankton.Test + PlanktonTests + v4.5 + Properties + + + + AnyCPU + + + ..\..\bin\Debug\ + True + Full + False + True + DEBUG;TRACE + + + ..\..\bin\Release\ + False + None + True + False + TRACE + + + false + + + false + + + + ..\..\lib\nunit.framework.dll + True + + + False + ..\..\..\..\..\AppData\Roaming\Grasshopper\Libraries\PlanktonGh.dll + + + + 3.5 + + + + 3.5 + + + + + + + + + + + + {BDD288F7-C2E2-4C2A-B083-E4D4D21F528F} + Plankton + + + + + + \ No newline at end of file diff --git a/src/packages/MathNet.Numerics.3.16.0/lib/net35/MathNet.Numerics.dll b/src/packages/MathNet.Numerics.3.16.0/lib/net35/MathNet.Numerics.dll new file mode 100644 index 0000000..0e1da92 Binary files /dev/null and b/src/packages/MathNet.Numerics.3.16.0/lib/net35/MathNet.Numerics.dll differ diff --git a/src/packages/MathNet.Numerics.3.16.0/lib/net35/MathNet.Numerics.xml b/src/packages/MathNet.Numerics.3.16.0/lib/net35/MathNet.Numerics.xml new file mode 100644 index 0000000..44d707a --- /dev/null +++ b/src/packages/MathNet.Numerics.3.16.0/lib/net35/MathNet.Numerics.xml @@ -0,0 +1,49790 @@ + + + + MathNet.Numerics + + + + + Useful extension methods for Arrays. + + + + + Copies the values from on array to another. + + The source array. + The destination array. + + + + Copies the values from on array to another. + + The source array. + The destination array. + + + + Copies the values from on array to another. + + The source array. + The destination array. + + + + Copies the values from on array to another. + + The source array. + The destination array. + + + + Enumerative Combinatorics and Counting. + + + + + Count the number of possible variations without repetition. + The order matters and each object can be chosen only once. + + Number of elements in the set. + Number of elements to choose from the set. Each element is chosen at most once. + Maximum number of distinct variations. + + + + Count the number of possible variations with repetition. + The order matters and each object can be chosen more than once. + + Number of elements in the set. + Number of elements to choose from the set. Each element is chosen 0, 1 or multiple times. + Maximum number of distinct variations with repetition. + + + + Count the number of possible combinations without repetition. + The order does not matter and each object can be chosen only once. + + Number of elements in the set. + Number of elements to choose from the set. Each element is chosen at most once. + Maximum number of combinations. + + + + Count the number of possible combinations with repetition. + The order does not matter and an object can be chosen more than once. + + Number of elements in the set. + Number of elements to choose from the set. Each element is chosen 0, 1 or multiple times. + Maximum number of combinations with repetition. + + + + Count the number of possible permutations (without repetition). + + Number of (distinguishable) elements in the set. + Maximum number of permutations without repetition. + + + + Generate a random permutation, without repetition, by generating the index numbers 0 to N-1 and shuffle them randomly. + Implemented using Fisher-Yates Shuffling. + + An array of length N that contains (in any order) the integers of the interval [0, N). + Number of (distinguishable) elements in the set. + The random number generator to use. Optional; the default random source will be used if null. + + + + Select a random permutation, without repetition, from a data array by reordering the provided array in-place. + Implemented using Fisher-Yates Shuffling. The provided data array will be modified. + + The data array to be reordered. The array will be modified by this routine. + The random number generator to use. Optional; the default random source will be used if null. + + + + Select a random permutation from a data sequence by returning the provided data in random order. + Implemented using Fisher-Yates Shuffling. + + The data elements to be reordered. + The random number generator to use. Optional; the default random source will be used if null. + + + + Generate a random combination, without repetition, by randomly selecting some of N elements. + + Number of elements in the set. + The random number generator to use. Optional; the default random source will be used if null. + Boolean mask array of length N, for each item true if it is selected. + + + + Generate a random combination, without repetition, by randomly selecting k of N elements. + + Number of elements in the set. + Number of elements to choose from the set. Each element is chosen at most once. + The random number generator to use. Optional; the default random source will be used if null. + Boolean mask array of length N, for each item true if it is selected. + + + + Select a random combination, without repetition, from a data sequence by selecting k elements in original order. + + The data source to choose from. + Number of elements (k) to choose from the data set. Each element is chosen at most once. + The random number generator to use. Optional; the default random source will be used if null. + The chosen combination, in the original order. + + + + Generates a random combination, with repetition, by randomly selecting k of N elements. + + Number of elements in the set. + Number of elements to choose from the set. Elements can be chosen more than once. + The random number generator to use. Optional; the default random source will be used if null. + Integer mask array of length N, for each item the number of times it was selected. + + + + Select a random combination, with repetition, from a data sequence by selecting k elements in original order. + + The data source to choose from. + Number of elements (k) to choose from the data set. Elements can be chosen more than once. + The random number generator to use. Optional; the default random source will be used if null. + The chosen combination with repetition, in the original order. + + + + Generate a random variation, without repetition, by randomly selecting k of n elements with order. + Implemented using partial Fisher-Yates Shuffling. + + Number of elements in the set. + Number of elements to choose from the set. Each element is chosen at most once. + The random number generator to use. Optional; the default random source will be used if null. + An array of length K that contains the indices of the selections as integers of the interval [0, N). + + + + Select a random variation, without repetition, from a data sequence by randomly selecting k elements in random order. + Implemented using partial Fisher-Yates Shuffling. + + The data source to choose from. + Number of elements (k) to choose from the set. Each element is chosen at most once. + The random number generator to use. Optional; the default random source will be used if null. + The chosen variation, in random order. + + + + Generate a random variation, with repetition, by randomly selecting k of n elements with order. + + Number of elements in the set. + Number of elements to choose from the set. Elements can be chosen more than once. + The random number generator to use. Optional; the default random source will be used if null. + An array of length K that contains the indices of the selections as integers of the interval [0, N). + + + + Select a random variation, with repetition, from a data sequence by randomly selecting k elements in random order. + + The data source to choose from. + Number of elements (k) to choose from the data set. Elements can be chosen more than once. + The random number generator to use. Optional; the default random source will be used if null. + The chosen variation with repetition, in random order. + + + + 32-bit single precision complex numbers class. + + + + The class Complex32 provides all elementary operations + on complex numbers. All the operators +, -, + *, /, ==, != are defined in the + canonical way. Additional complex trigonometric functions + are also provided. Note that the Complex32 structures + has two special constant values and + . + + + + Complex32 x = new Complex32(1f,2f); + Complex32 y = Complex32.FromPolarCoordinates(1f, Math.Pi); + Complex32 z = (x + y) / (x - y); + + + + For mathematical details about complex numbers, please + have a look at the + Wikipedia + + + + + + The real component of the complex number. + + + + + The imaginary component of the complex number. + + + + + Initializes a new instance of the Complex32 structure with the given real + and imaginary parts. + + The value for the real component. + The value for the imaginary component. + + + + Creates a complex number from a point's polar coordinates. + + A complex number. + The magnitude, which is the distance from the origin (the intersection of the x-axis and the y-axis) to the number. + The phase, which is the angle from the line to the horizontal axis, measured in radians. + + + + Returns a new instance + with a real number equal to zero and an imaginary number equal to zero. + + + + + Returns a new instance + with a real number equal to one and an imaginary number equal to zero. + + + + + Returns a new instance + with a real number equal to zero and an imaginary number equal to one. + + + + + Returns a new instance + with real and imaginary numbers positive infinite. + + + + + Returns a new instance + with real and imaginary numbers not a number. + + + + + Gets the real component of the complex number. + + The real component of the complex number. + + + + Gets the real imaginary component of the complex number. + + The real imaginary component of the complex number. + + + + Gets the phase or argument of this Complex32. + + + Phase always returns a value bigger than negative Pi and + smaller or equal to Pi. If this Complex32 is zero, the Complex32 + is assumed to be positive real with an argument of zero. + + The phase or argument of this Complex32 + + + + Gets the magnitude (or absolute value) of a complex number. + + Assuming that magnitude of (inf,a) and (a,inf) and (inf,inf) is inf and (NaN,a), (a,NaN) and (NaN,NaN) is NaN + The magnitude of the current instance. + + + + Gets the squared magnitude (or squared absolute value) of a complex number. + + The squared magnitude of the current instance. + + + + Gets the unity of this complex (same argument, but on the unit circle; exp(I*arg)) + + The unity of this Complex32. + + + + Gets a value indicating whether the Complex32 is zero. + + true if this instance is zero; otherwise, false. + + + + Gets a value indicating whether the Complex32 is one. + + true if this instance is one; otherwise, false. + + + + Gets a value indicating whether the Complex32 is the imaginary unit. + + true if this instance is ImaginaryOne; otherwise, false. + + + + Gets a value indicating whether the provided Complex32evaluates + to a value that is not a number. + + + true if this instance is ; otherwise, + false. + + + + + Gets a value indicating whether the provided Complex32 evaluates to an + infinite value. + + + true if this instance is infinite; otherwise, false. + + + True if it either evaluates to a complex infinity + or to a directed infinity. + + + + + Gets a value indicating whether the provided Complex32 is real. + + true if this instance is a real number; otherwise, false. + + + + Gets a value indicating whether the provided Complex32 is real and not negative, that is >= 0. + + + true if this instance is real nonnegative number; otherwise, false. + + + + + Exponential of this Complex32 (exp(x), E^x). + + + The exponential of this complex number. + + + + + Natural Logarithm of this Complex32 (Base E). + + The natural logarithm of this complex number. + + + + Common Logarithm of this Complex32 (Base 10). + + The common logarithm of this complex number. + + + + Logarithm of this Complex32 with custom base. + + The logarithm of this complex number. + + + + Raise this Complex32 to the given value. + + + The exponent. + + + The complex number raised to the given exponent. + + + + + Raise this Complex32 to the inverse of the given value. + + + The root exponent. + + + The complex raised to the inverse of the given exponent. + + + + + The Square (power 2) of this Complex32 + + + The square of this complex number. + + + + + The Square Root (power 1/2) of this Complex32 + + + The square root of this complex number. + + + + + Evaluate all square roots of this Complex32. + + + + + Evaluate all cubic roots of this Complex32. + + + + + Equality test. + + One of complex numbers to compare. + The other complex numbers to compare. + true if the real and imaginary components of the two complex numbers are equal; false otherwise. + + + + Inequality test. + + One of complex numbers to compare. + The other complex numbers to compare. + true if the real or imaginary components of the two complex numbers are not equal; false otherwise. + + + + Unary addition. + + The complex number to operate on. + Returns the same complex number. + + + + Unary minus. + + The complex number to operate on. + The negated value of the . + + + Addition operator. Adds two complex numbers together. + The result of the addition. + One of the complex numbers to add. + The other complex numbers to add. + + + Subtraction operator. Subtracts two complex numbers. + The result of the subtraction. + The complex number to subtract from. + The complex number to subtract. + + + Addition operator. Adds a complex number and float together. + The result of the addition. + The complex numbers to add. + The float value to add. + + + Subtraction operator. Subtracts float value from a complex value. + The result of the subtraction. + The complex number to subtract from. + The float value to subtract. + + + Addition operator. Adds a complex number and float together. + The result of the addition. + The float value to add. + The complex numbers to add. + + + Subtraction operator. Subtracts complex value from a float value. + The result of the subtraction. + The float vale to subtract from. + The complex value to subtract. + + + Multiplication operator. Multiplies two complex numbers. + The result of the multiplication. + One of the complex numbers to multiply. + The other complex number to multiply. + + + Multiplication operator. Multiplies a complex number with a float value. + The result of the multiplication. + The float value to multiply. + The complex number to multiply. + + + Multiplication operator. Multiplies a complex number with a float value. + The result of the multiplication. + The complex number to multiply. + The float value to multiply. + + + Division operator. Divides a complex number by another. + Enhanced Smith's algorithm for dividing two complex numbers + + The result of the division. + The dividend. + The divisor. + + + + Helper method for dividing. + + Re first + Im first + Re second + Im second + + + + + Division operator. Divides a float value by a complex number. + Algorithm based on Smith's algorithm + + The result of the division. + The dividend. + The divisor. + + + Division operator. Divides a complex number by a float value. + The result of the division. + The dividend. + The divisor. + + + + Computes the conjugate of a complex number and returns the result. + + + + + Returns the multiplicative inverse of a complex number. + + + + + Converts the value of the current complex number to its equivalent string representation in Cartesian form. + + The string representation of the current instance in Cartesian form. + + + + Converts the value of the current complex number to its equivalent string representation + in Cartesian form by using the specified format for its real and imaginary parts. + + The string representation of the current instance in Cartesian form. + A standard or custom numeric format string. + + is not a valid format string. + + + + Converts the value of the current complex number to its equivalent string representation + in Cartesian form by using the specified culture-specific formatting information. + + The string representation of the current instance in Cartesian form, as specified by . + An object that supplies culture-specific formatting information. + + + Converts the value of the current complex number to its equivalent string representation + in Cartesian form by using the specified format and culture-specific format information for its real and imaginary parts. + The string representation of the current instance in Cartesian form, as specified by and . + A standard or custom numeric format string. + An object that supplies culture-specific formatting information. + + is not a valid format string. + + + + Checks if two complex numbers are equal. Two complex numbers are equal if their + corresponding real and imaginary components are equal. + + + Returns true if the two objects are the same object, or if their corresponding + real and imaginary components are equal, false otherwise. + + + The complex number to compare to with. + + + + + The hash code for the complex number. + + + The hash code of the complex number. + + + The hash code is calculated as + System.Math.Exp(ComplexMath.Absolute(complexNumber)). + + + + + Checks if two complex numbers are equal. Two complex numbers are equal if their + corresponding real and imaginary components are equal. + + + Returns true if the two objects are the same object, or if their corresponding + real and imaginary components are equal, false otherwise. + + + The complex number to compare to with. + + + + + Creates a complex number based on a string. The string can be in the + following formats (without the quotes): 'n', 'ni', 'n +/- ni', + 'ni +/- n', 'n,n', 'n,ni,' '(n,n)', or '(n,ni)', where n is a float. + + + A complex number containing the value specified by the given string. + + + the string to parse. + + + An that supplies culture-specific + formatting information. + + + + + Parse a part (real or complex) from a complex number. + + Start Token. + Is set to true if the part identified itself as being imaginary. + + An that supplies culture-specific + formatting information. + + Resulting part as float. + + + + + Converts the string representation of a complex number to a single-precision complex number equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex number to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will contain complex32.Zero. This parameter is passed uninitialized + + + + + Converts the string representation of a complex number to single-precision complex number equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex number to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will contain complex32.Zero. This parameter is passed uninitialized + + + + + Explicit conversion of a real decimal to a Complex32. + + The decimal value to convert. + The result of the conversion. + + + + Explicit conversion of a Complex to a Complex32. + + The decimal value to convert. + The result of the conversion. + + + + Implicit conversion of a real byte to a Complex32. + + The byte value to convert. + The result of the conversion. + + + + Implicit conversion of a real short to a Complex32. + + The short value to convert. + The result of the conversion. + + + + Implicit conversion of a signed byte to a Complex32. + + The signed byte value to convert. + The result of the conversion. + + + + Implicit conversion of a unsgined real short to a Complex32. + + The unsgined short value to convert. + The result of the conversion. + + + + Implicit conversion of a real int to a Complex32. + + The int value to convert. + The result of the conversion. + + + + Implicit conversion of a real long to a Complex32. + + The long value to convert. + The result of the conversion. + + + + Implicit conversion of a real uint to a Complex32. + + The uint value to convert. + The result of the conversion. + + + + Implicit conversion of a real ulong to a Complex32. + + The ulong value to convert. + The result of the conversion. + + + + Implicit conversion of a real float to a Complex32. + + The float value to convert. + The result of the conversion. + + + + Implicit conversion of a real double to a Complex32. + + The double value to convert. + The result of the conversion. + + + + Converts this Complex32 to a . + + A with the same values as this Complex32. + + + + Returns the additive inverse of a specified complex number. + + The result of the real and imaginary components of the value parameter multiplied by -1. + A complex number. + + + + Computes the conjugate of a complex number and returns the result. + + The conjugate of . + A complex number. + + + + Adds two complex numbers and returns the result. + + The sum of and . + The first complex number to add. + The second complex number to add. + + + + Subtracts one complex number from another and returns the result. + + The result of subtracting from . + The value to subtract from (the minuend). + The value to subtract (the subtrahend). + + + + Returns the product of two complex numbers. + + The product of the and parameters. + The first complex number to multiply. + The second complex number to multiply. + + + + Divides one complex number by another and returns the result. + + The quotient of the division. + The complex number to be divided. + The complex number to divide by. + + + + Returns the multiplicative inverse of a complex number. + + The reciprocal of . + A complex number. + + + + Returns the square root of a specified complex number. + + The square root of . + A complex number. + + + + Gets the absolute value (or magnitude) of a complex number. + + The absolute value of . + A complex number. + + + + Returns e raised to the power specified by a complex number. + + The number e raised to the power . + A complex number that specifies a power. + + + + Returns a specified complex number raised to a power specified by a complex number. + + The complex number raised to the power . + A complex number to be raised to a power. + A complex number that specifies a power. + + + + Returns a specified complex number raised to a power specified by a single-precision floating-point number. + + The complex number raised to the power . + A complex number to be raised to a power. + A single-precision floating-point number that specifies a power. + + + + Returns the natural (base e) logarithm of a specified complex number. + + The natural (base e) logarithm of . + A complex number. + + + + Returns the logarithm of a specified complex number in a specified base. + + The logarithm of in base . + A complex number. + The base of the logarithm. + + + + Returns the base-10 logarithm of a specified complex number. + + The base-10 logarithm of . + A complex number. + + + + Returns the sine of the specified complex number. + + The sine of . + A complex number. + + + + Returns the cosine of the specified complex number. + + The cosine of . + A complex number. + + + + Returns the tangent of the specified complex number. + + The tangent of . + A complex number. + + + + Returns the angle that is the arc sine of the specified complex number. + + The angle which is the arc sine of . + A complex number. + + + + Returns the angle that is the arc cosine of the specified complex number. + + The angle, measured in radians, which is the arc cosine of . + A complex number that represents a cosine. + + + + Returns the angle that is the arc tangent of the specified complex number. + + The angle that is the arc tangent of . + A complex number. + + + + Returns the hyperbolic sine of the specified complex number. + + The hyperbolic sine of . + A complex number. + + + + Returns the hyperbolic cosine of the specified complex number. + + The hyperbolic cosine of . + A complex number. + + + + Returns the hyperbolic tangent of the specified complex number. + + The hyperbolic tangent of . + A complex number. + + + + 64-bit double precision complex numbers class. + + + + The class Complex provides all elementary operations + on complex numbers. All the operators +, -, + *, /, ==, != are defined in the + canonical way. Additional complex trigonometric functions + are also provided. Note that the Complex structures + has two special constant values and + . + + + + Complex x = new Complex(1d, 2d); + Complex y = Complex.FromPolarCoordinates(1d, Math.Pi); + Complex z = (x + y) / (x - y); + + + + For mathematical details about complex numbers, please + have a look at the + Wikipedia + + + + + + The real component of the complex number. + + + + + The imaginary component of the complex number. + + + + + Initializes a new instance of the Complex structure with the given real + and imaginary parts. + + The value for the real component. + The value for the imaginary component. + + + + Creates a complex number from a point's polar coordinates. + + A complex number. + The magnitude, which is the distance from the origin (the intersection of the x-axis and the y-axis) to the number. + The phase, which is the angle from the line to the horizontal axis, measured in radians. + + + + Returns a new Complex instance + with a real number equal to zero and an imaginary number equal to zero. + + + + + Returns a new Complex instance + with a real number equal to one and an imaginary number equal to zero. + + + + + Returns a new Complex instance + with a real number equal to zero and an imaginary number equal to one. + + + + + Returns a new Complex instance + with real and imaginary numbers positive infinite. + + + + + Returns a new Complex instance + with real and imaginary numbers not a number. + + + + + Gets the real component of the complex number. + + The real component of the complex number. + + + + Gets the real imaginary component of the complex number. + + The real imaginary component of the complex number. + + + + Gets the phase or argument of this Complex. + + + Phase always returns a value bigger than negative Pi and + smaller or equal to Pi. If this Complex is zero, the Complex + is assumed to be positive real with an argument of zero. + + The phase or argument of this Complex + + + + Gets the magnitude (or absolute value) of a complex number. + + The magnitude of the current instance. + + + + Equality test. + + One of complex numbers to compare. + The other complex numbers to compare. + true if the real and imaginary components of the two complex numbers are equal; false otherwise. + + + + Inequality test. + + One of complex numbers to compare. + The other complex numbers to compare. + true if the real or imaginary components of the two complex numbers are not equal; false otherwise. + + + + Unary addition. + + The complex number to operate on. + Returns the same complex number. + + + + Unary minus. + + The complex number to operate on. + The negated value of the . + + + Addition operator. Adds two complex numbers together. + The result of the addition. + One of the complex numbers to add. + The other complex numbers to add. + + + Subtraction operator. Subtracts two complex numbers. + The result of the subtraction. + The complex number to subtract from. + The complex number to subtract. + + + Addition operator. Adds a complex number and double together. + The result of the addition. + The complex numbers to add. + The double value to add. + + + Subtraction operator. Subtracts double value from a complex value. + The result of the subtraction. + The complex number to subtract from. + The double value to subtract. + + + Addition operator. Adds a complex number and double together. + The result of the addition. + The double value to add. + The complex numbers to add. + + + Subtraction operator. Subtracts complex value from a double value. + The result of the subtraction. + The double vale to subtract from. + The complex value to subtract. + + + Multiplication operator. Multiplies two complex numbers. + The result of the multiplication. + One of the complex numbers to multiply. + The other complex number to multiply. + + + Multiplication operator. Multiplies a complex number with a double value. + The result of the multiplication. + The double value to multiply. + The complex number to multiply. + + + Multiplication operator. Multiplies a complex number with a double value. + The result of the multiplication. + The complex number to multiply. + The double value to multiply. + + + Division operator. Divides a complex number by another. + The result of the division. + The dividend. + The divisor. + + + Division operator. Divides a double value by a complex number. + The result of the division. + The dividend. + The divisor. + + + Division operator. Divides a complex number by a double value. + The result of the division. + The dividend. + The divisor. + + + + A string representation of this complex number. + + + The string representation of this complex number. + + + + + A string representation of this complex number. + + + The string representation of this complex number formatted as specified by the + format string. + + + A format specification. + + + + + A string representation of this complex number. + + + The string representation of this complex number formatted as specified by the + format provider. + + + An that supplies culture-specific formatting information. + + + + + A string representation of this complex number. + + + The string representation of this complex number formatted as specified by the + format string and format provider. + + + if the n, is not a number. + + + if s, is . + + + A format specification. + + + An that supplies culture-specific formatting information. + + + + + Checks if two complex numbers are equal. Two complex numbers are equal if their + corresponding real and imaginary components are equal. + + + Returns true if the two objects are the same object, or if their corresponding + real and imaginary components are equal, false otherwise. + + + The complex number to compare to with. + + + + + The hash code for the complex number. + + + The hash code of the complex number. + + + The hash code is calculated as + System.Math.Exp(ComplexMath.Absolute(complexNumber)). + + + + + Checks if two complex numbers are equal. Two complex numbers are equal if their + corresponding real and imaginary components are equal. + + + Returns true if the two objects are the same object, or if their corresponding + real and imaginary components are equal, false otherwise. + + + The complex number to compare to with. + + + + + Returns a Norm of a value of this type, which is appropriate for measuring how + close this value is to zero. + + + A norm of this value. + + + + + Returns a Norm of the difference of two values of this type, which is + appropriate for measuring how close together these two values are. + + + The value to compare with. + + + A norm of the difference between this and the other value. + + + + + Creates a complex number based on a string. The string can be in the + following formats (without the quotes): 'n', 'ni', 'n +/- ni', + 'ni +/- n', 'n,n', 'n,ni,' '(n,n)', or '(n,ni)', where n is a double. + + + A complex number containing the value specified by the given string. + + + The string to parse. + + + + + Creates a complex number based on a string. The string can be in the + following formats (without the quotes): 'n', 'ni', 'n +/- ni', + 'ni +/- n', 'n,n', 'n,ni,' '(n,n)', or '(n,ni)', where n is a double. + + + A complex number containing the value specified by the given string. + + + the string to parse. + + + An that supplies culture-specific + formatting information. + + + + + Parse a part (real or complex) from a complex number. + + Start Token. + Is set to true if the part identified itself as being imaginary. + + An that supplies culture-specific + formatting information. + + Resulting part as double. + + + + + Converts the string representation of a complex number to a single-precision complex number equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex number to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will contain complex32.Zero. This parameter is passed uninitialized + + + + + Converts the string representation of a complex number to single-precision complex number equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex number to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will contain complex32.Zero. This parameter is passed uninitialized + + + + + Explicit conversion of a real decimal to a Complex. + + The decimal value to convert. + The result of the conversion. + + + + Explicit conversion of a Complex to a Complex. + + The decimal value to convert. + The result of the conversion. + + + + Implicit conversion of a real byte to a Complex. + + The byte value to convert. + The result of the conversion. + + + + Implicit conversion of a real short to a Complex. + + The short value to convert. + The result of the conversion. + + + + Implicit conversion of a signed byte to a Complex. + + The signed byte value to convert. + The result of the conversion. + + + + Implicit conversion of a unsgined real short to a Complex. + + The unsgined short value to convert. + The result of the conversion. + + + + Implicit conversion of a real int to a Complex. + + The int value to convert. + The result of the conversion. + + + + Implicit conversion of a real long to a Complex. + + The long value to convert. + The result of the conversion. + + + + Implicit conversion of a real uint to a Complex. + + The uint value to convert. + The result of the conversion. + + + + Implicit conversion of a real ulong to a Complex. + + The ulong value to convert. + The result of the conversion. + + + + Implicit conversion of a real double to a Complex. + + The double value to convert. + The result of the conversion. + + + + Implicit conversion of a real float to a Complex. + + The double value to convert. + The result of the conversion. + + + + Converts this Complex to a . + + A with the same values as this Complex. + + + + Returns the additive inverse of a specified complex number. + + The result of the and components of the parameter multiplied by -1. + A complex number. + + + + Computes the conjugate of a complex number and returns the result. + + The conjugate of . + A complex number. + + + + Adds two complex numbers and returns the result. + + The sum of and . + The first complex number to add. + The second complex number to add. + + + + Subtracts one complex number from another and returns the result. + + The result of subtracting from . + The value to subtract from (the minuend). + The value to subtract (the subtrahend). + + + + Returns the product of two complex numbers. + + The product of the and parameters. + The first complex number to multiply. + The second complex number to multiply. + + + + Divides one complex number by another and returns the result. + + The quotient of the division. + The complex number to be divided. + The complex number to divide by. + + + + Returns the multiplicative inverse of a complex number. + + The reciprocal of . + A complex number. + + + + Returns the square root of a specified complex number. + + The square root of . + A complex number. + + + + Gets the absolute value (or magnitude) of a complex number. + + A complex number. + The absolute value (or magnitude) of a complex number. + + + + Returns e raised to the power specified by a complex number. + + The number e raised to the power . + A complex number that specifies a power. + + + + Returns a specified complex number raised to a power specified by a complex number. + + The complex number raised to the power . + A complex number to be raised to a power. + A complex number that specifies a power. + + + + Returns a specified complex number raised to a power specified by a double-precision floating-point number. + + The complex number raised to the power . + A complex number to be raised to a power. + A double-precision floating-point number that specifies a power. + + + + Returns the natural (base e) logarithm of a specified complex number. + + The natural (base e) logarithm of . + A complex number. + + + + Returns the logarithm of a specified complex number in a specified base. + + The logarithm of in base . + A complex number. + The base of the logarithm. + + + + Returns the base-10 logarithm of a specified complex number. + + The base-10 logarithm of . + A complex number. + + + + Returns the sine of the specified complex number. + + The sine of . + A complex number. + + + + Returns the cosine of the specified complex number. + + The cosine of . + A complex number. + + + + Returns the tangent of the specified complex number. + + The tangent of . + A complex number. + + + + Returns the angle that is the arc sine of the specified complex number. + + The angle which is the arc sine of . + A complex number. + + + + Returns the angle that is the arc cosine of the specified complex number. + + The angle, measured in radians, which is the arc cosine of . + A complex number that represents a cosine. + + + + Returns the angle that is the arc tangent of the specified complex number. + + The angle that is the arc tangent of . + A complex number. + + + + Returns the hyperbolic sine of the specified complex number. + + The hyperbolic sine of . + A complex number. + + + + Returns the hyperbolic cosine of the specified complex number. + + The hyperbolic cosine of . + A complex number. + + + + Returns the hyperbolic tangent of the specified complex number. + + The hyperbolic tangent of . + A complex number. + + + + Extension methods for the Complex type provided by System.Numerics + + + + + Gets the squared magnitude of the Complex number. + + The number to perfom this operation on. + The squared magnitude of the Complex number. + + + + Gets the unity of this complex (same argument, but on the unit circle; exp(I*arg)) + + The unity of this Complex. + + + + Gets the conjugate of the Complex number. + + The number to perfom this operation on. + + The semantic of setting the conjugate is such that + + // a, b of type Complex32 + a.Conjugate = b; + + is equivalent to + + // a, b of type Complex32 + a = b.Conjugate + + + The conjugate of the number. + + + + Returns the multiplicative inverse of a complex number. + + + + + Exponential of this Complex (exp(x), E^x). + + The number to perfom this operation on. + + The exponential of this complex number. + + + + + Natural Logarithm of this Complex (Base E). + + The number to perfom this operation on. + + The natural logarithm of this complex number. + + + + + Common Logarithm of this Complex (Base 10). + + The common logarithm of this complex number. + + + + Logarithm of this Complex with custom base. + + The logarithm of this complex number. + + + + Raise this Complex to the given value. + + The number to perfom this operation on. + + The exponent. + + + The complex number raised to the given exponent. + + + + + Raise this Complex to the inverse of the given value. + + The number to perfom this operation on. + + The root exponent. + + + The complex raised to the inverse of the given exponent. + + + + + The Square (power 2) of this Complex + + The number to perfom this operation on. + + The square of this complex number. + + + + + The Square Root (power 1/2) of this Complex + + The number to perfom this operation on. + + The square root of this complex number. + + + + + Evaluate all square roots of this Complex. + + + + + Evaluate all cubic roots of this Complex. + + + + + Gets a value indicating whether the Complex32 is zero. + + The number to perfom this operation on. + true if this instance is zero; otherwise, false. + + + + Gets a value indicating whether the Complex32 is one. + + The number to perfom this operation on. + true if this instance is one; otherwise, false. + + + + Gets a value indicating whether the Complex32 is the imaginary unit. + + true if this instance is ImaginaryOne; otherwise, false. + The number to perfom this operation on. + + + + Gets a value indicating whether the provided Complex32evaluates + to a value that is not a number. + + The number to perfom this operation on. + + true if this instance is NaN; otherwise, + false. + + + + + Gets a value indicating whether the provided Complex32 evaluates to an + infinite value. + + The number to perfom this operation on. + + true if this instance is infinite; otherwise, false. + + + True if it either evaluates to a complex infinity + or to a directed infinity. + + + + + Gets a value indicating whether the provided Complex32 is real. + + The number to perfom this operation on. + true if this instance is a real number; otherwise, false. + + + + Gets a value indicating whether the provided Complex32 is real and not negative, that is >= 0. + + The number to perfom this operation on. + + true if this instance is real nonnegative number; otherwise, false. + + + + + Returns a Norm of a value of this type, which is appropriate for measuring how + close this value is to zero. + + + + + Returns a Norm of a value of this type, which is appropriate for measuring how + close this value is to zero. + + + + + Returns a Norm of the difference of two values of this type, which is + appropriate for measuring how close together these two values are. + + + + + Returns a Norm of the difference of two values of this type, which is + appropriate for measuring how close together these two values are. + + + + + Creates a complex number based on a string. The string can be in the + following formats (without the quotes): 'n', 'ni', 'n +/- ni', + 'ni +/- n', 'n,n', 'n,ni,' '(n,n)', or '(n,ni)', where n is a double. + + + A complex number containing the value specified by the given string. + + + The string to parse. + + + + + Creates a complex number based on a string. The string can be in the + following formats (without the quotes): 'n', 'ni', 'n +/- ni', + 'ni +/- n', 'n,n', 'n,ni,' '(n,n)', or '(n,ni)', where n is a double. + + + A complex number containing the value specified by the given string. + + + the string to parse. + + + An that supplies culture-specific + formatting information. + + + + + Parse a part (real or complex) from a complex number. + + Start Token. + Is set to true if the part identified itself as being imaginary. + + An that supplies culture-specific + formatting information. + + Resulting part as double. + + + + + Converts the string representation of a complex number to a double-precision complex number equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex number to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will contain Complex.Zero. This parameter is passed uninitialized. + + + + + Converts the string representation of a complex number to double-precision complex number equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex number to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will contain complex32.Zero. This parameter is passed uninitialized + + + + + Creates a Complex32 number based on a string. The string can be in the + following formats (without the quotes): 'n', 'ni', 'n +/- ni', + 'ni +/- n', 'n,n', 'n,ni,' '(n,n)', or '(n,ni)', where n is a double. + + + A complex number containing the value specified by the given string. + + + the string to parse. + + + + + Creates a Complex32 number based on a string. The string can be in the + following formats (without the quotes): 'n', 'ni', 'n +/- ni', + 'ni +/- n', 'n,n', 'n,ni,' '(n,n)', or '(n,ni)', where n is a double. + + + A complex number containing the value specified by the given string. + + + the string to parse. + + + An that supplies culture-specific + formatting information. + + + + + Converts the string representation of a complex number to a single-precision complex number equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex number to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will contain complex32.Zero. This parameter is passed uninitialized. + + + + + Converts the string representation of a complex number to single-precision complex number equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex number to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will contain Complex.Zero. This parameter is passed uninitialized. + + + + + A collection of frequently used mathematical constants. + + + + The number e + + + The number log[2](e) + + + The number log[10](e) + + + The number log[e](2) + + + The number log[e](10) + + + The number log[e](pi) + + + The number log[e](2*pi)/2 + + + The number 1/e + + + The number sqrt(e) + + + The number sqrt(2) + + + The number sqrt(3) + + + The number sqrt(1/2) = 1/sqrt(2) = sqrt(2)/2 + + + The number sqrt(3)/2 + + + The number pi + + + The number pi*2 + + + The number pi/2 + + + The number pi*3/2 + + + The number pi/4 + + + The number sqrt(pi) + + + The number sqrt(2pi) + + + The number sqrt(2*pi*e) + + + The number log(sqrt(2*pi)) + + + The number log(sqrt(2*pi*e)) + + + The number log(2 * sqrt(e / pi)) + + + The number 1/pi + + + The number 2/pi + + + The number 1/sqrt(pi) + + + The number 1/sqrt(2pi) + + + The number 2/sqrt(pi) + + + The number 2 * sqrt(e / pi) + + + The number (pi)/180 - factor to convert from Degree (deg) to Radians (rad). + + + + + The number (pi)/200 - factor to convert from NewGrad (grad) to Radians (rad). + + + + + The number ln(10)/20 - factor to convert from Power Decibel (dB) to Neper (Np). Use this version when the Decibel represent a power gain but the compared values are not powers (e.g. amplitude, current, voltage). + + + The number ln(10)/10 - factor to convert from Neutral Decibel (dB) to Neper (Np). Use this version when either both or neither of the Decibel and the compared values represent powers. + + + The Catalan constant + Sum(k=0 -> inf){ (-1)^k/(2*k + 1)2 } + + + The Euler-Mascheroni constant + lim(n -> inf){ Sum(k=1 -> n) { 1/k - log(n) } } + + + The number (1+sqrt(5))/2, also known as the golden ratio + + + The Glaisher constant + e^(1/12 - Zeta(-1)) + + + The Khinchin constant + prod(k=1 -> inf){1+1/(k*(k+2))^log(k,2)} + + + + The size of a double in bytes. + + + + + The size of an int in bytes. + + + + + The size of a float in bytes. + + + + + The size of a Complex in bytes. + + + + + The size of a Complex in bytes. + + + + Speed of Light in Vacuum: c_0 = 2.99792458e8 [m s^-1] (defined, exact; 2007 CODATA) + + + Magnetic Permeability in Vacuum: mu_0 = 4*Pi * 10^-7 [N A^-2 = kg m A^-2 s^-2] (defined, exact; 2007 CODATA) + + + Electric Permittivity in Vacuum: epsilon_0 = 1/(mu_0*c_0^2) [F m^-1 = A^2 s^4 kg^-1 m^-3] (defined, exact; 2007 CODATA) + + + Characteristic Impedance of Vacuum: Z_0 = mu_0*c_0 [Ohm = m^2 kg s^-3 A^-2] (defined, exact; 2007 CODATA) + + + Newtonian Constant of Gravitation: G = 6.67429e-11 [m^3 kg^-1 s^-2] (2007 CODATA) + + + Planck's constant: h = 6.62606896e-34 [J s = m^2 kg s^-1] (2007 CODATA) + + + Reduced Planck's constant: h_bar = h / (2*Pi) [J s = m^2 kg s^-1] (2007 CODATA) + + + Planck mass: m_p = (h_bar*c_0/G)^(1/2) [kg] (2007 CODATA) + + + Planck temperature: T_p = (h_bar*c_0^5/G)^(1/2)/k [K] (2007 CODATA) + + + Planck length: l_p = h_bar/(m_p*c_0) [m] (2007 CODATA) + + + Planck time: t_p = l_p/c_0 [s] (2007 CODATA) + + + Elementary Electron Charge: e = 1.602176487e-19 [C = A s] (2007 CODATA) + + + Magnetic Flux Quantum: theta_0 = h/(2*e) [Wb = m^2 kg s^-2 A^-1] (2007 CODATA) + + + Conductance Quantum: G_0 = 2*e^2/h [S = m^-2 kg^-1 s^3 A^2] (2007 CODATA) + + + Josephson Constant: K_J = 2*e/h [Hz V^-1] (2007 CODATA) + + + Von Klitzing Constant: R_K = h/e^2 [Ohm = m^2 kg s^-3 A^-2] (2007 CODATA) + + + Bohr Magneton: mu_B = e*h_bar/2*m_e [J T^-1] (2007 CODATA) + + + Nuclear Magneton: mu_N = e*h_bar/2*m_p [J T^-1] (2007 CODATA) + + + Fine Structure Constant: alpha = e^2/4*Pi*e_0*h_bar*c_0 [1] (2007 CODATA) + + + Rydberg Constant: R_infty = alpha^2*m_e*c_0/2*h [m^-1] (2007 CODATA) + + + Bor Radius: a_0 = alpha/4*Pi*R_infty [m] (2007 CODATA) + + + Hartree Energy: E_h = 2*R_infty*h*c_0 [J] (2007 CODATA) + + + Quantum of Circulation: h/2*m_e [m^2 s^-1] (2007 CODATA) + + + Fermi Coupling Constant: G_F/(h_bar*c_0)^3 [GeV^-2] (2007 CODATA) + + + Weak Mixin Angle: sin^2(theta_W) [1] (2007 CODATA) + + + Electron Mass: [kg] (2007 CODATA) + + + Electron Mass Energy Equivalent: [J] (2007 CODATA) + + + Electron Molar Mass: [kg mol^-1] (2007 CODATA) + + + Electron Compton Wavelength: [m] (2007 CODATA) + + + Classical Electron Radius: [m] (2007 CODATA) + + + Tomson Cross Section: [m^2] (2002 CODATA) + + + Electron Magnetic Moment: [J T^-1] (2007 CODATA) + + + Electon G-Factor: [1] (2007 CODATA) + + + Muon Mass: [kg] (2007 CODATA) + + + Muon Mass Energy Equivalent: [J] (2007 CODATA) + + + Muon Molar Mass: [kg mol^-1] (2007 CODATA) + + + Muon Compton Wavelength: [m] (2007 CODATA) + + + Muon Magnetic Moment: [J T^-1] (2007 CODATA) + + + Muon G-Factor: [1] (2007 CODATA) + + + Tau Mass: [kg] (2007 CODATA) + + + Tau Mass Energy Equivalent: [J] (2007 CODATA) + + + Tau Molar Mass: [kg mol^-1] (2007 CODATA) + + + Tau Compton Wavelength: [m] (2007 CODATA) + + + Proton Mass: [kg] (2007 CODATA) + + + Proton Mass Energy Equivalent: [J] (2007 CODATA) + + + Proton Molar Mass: [kg mol^-1] (2007 CODATA) + + + Proton Compton Wavelength: [m] (2007 CODATA) + + + Proton Magnetic Moment: [J T^-1] (2007 CODATA) + + + Proton G-Factor: [1] (2007 CODATA) + + + Proton Shielded Magnetic Moment: [J T^-1] (2007 CODATA) + + + Proton Gyro-Magnetic Ratio: [s^-1 T^-1] (2007 CODATA) + + + Proton Shielded Gyro-Magnetic Ratio: [s^-1 T^-1] (2007 CODATA) + + + Neutron Mass: [kg] (2007 CODATA) + + + Neutron Mass Energy Equivalent: [J] (2007 CODATA) + + + Neutron Molar Mass: [kg mol^-1] (2007 CODATA) + + + Neuron Compton Wavelength: [m] (2007 CODATA) + + + Neutron Magnetic Moment: [J T^-1] (2007 CODATA) + + + Neutron G-Factor: [1] (2007 CODATA) + + + Neutron Gyro-Magnetic Ratio: [s^-1 T^-1] (2007 CODATA) + + + Deuteron Mass: [kg] (2007 CODATA) + + + Deuteron Mass Energy Equivalent: [J] (2007 CODATA) + + + Deuteron Molar Mass: [kg mol^-1] (2007 CODATA) + + + Deuteron Magnetic Moment: [J T^-1] (2007 CODATA) + + + Helion Mass: [kg] (2007 CODATA) + + + Helion Mass Energy Equivalent: [J] (2007 CODATA) + + + Helion Molar Mass: [kg mol^-1] (2007 CODATA) + + + Avogadro constant: [mol^-1] (2010 CODATA) + + + The SI prefix factor corresponding to 1 000 000 000 000 000 000 000 000 + + + The SI prefix factor corresponding to 1 000 000 000 000 000 000 000 + + + The SI prefix factor corresponding to 1 000 000 000 000 000 000 + + + The SI prefix factor corresponding to 1 000 000 000 000 000 + + + The SI prefix factor corresponding to 1 000 000 000 000 + + + The SI prefix factor corresponding to 1 000 000 000 + + + The SI prefix factor corresponding to 1 000 000 + + + The SI prefix factor corresponding to 1 000 + + + The SI prefix factor corresponding to 100 + + + The SI prefix factor corresponding to 10 + + + The SI prefix factor corresponding to 0.1 + + + The SI prefix factor corresponding to 0.01 + + + The SI prefix factor corresponding to 0.001 + + + The SI prefix factor corresponding to 0.000 001 + + + The SI prefix factor corresponding to 0.000 000 001 + + + The SI prefix factor corresponding to 0.000 000 000 001 + + + The SI prefix factor corresponding to 0.000 000 000 000 001 + + + The SI prefix factor corresponding to 0.000 000 000 000 000 001 + + + The SI prefix factor corresponding to 0.000 000 000 000 000 000 001 + + + The SI prefix factor corresponding to 0.000 000 000 000 000 000 000 001 + + + + Sets parameters for the library. + + + + + Use a specific provider if configured, e.g. using + environment variables, or fall back to the best providers. + + + + + Use the best provider available. + + + + + Gets or sets a value indicating whether the distribution classes check validate each parameter. + For the multivariate distributions this could involve an expensive matrix factorization. + The default setting of this property is true. + + + + + Gets or sets a value indicating whether to use thread safe random number generators (RNG). + Thread safe RNG about two and half time slower than non-thread safe RNG. + + + true to use thread safe random number generators ; otherwise, false. + + + + + Optional path to try to load native provider binaries from. + + + + + Gets or sets the linear algebra provider. Consider to use UseNativeMKL or UseManaged instead. + + The linear algebra provider. + + + + Gets or sets the fourier transform provider. Consider to use UseNativeMKL or UseManaged instead. + + The linear algebra provider. + + + + Gets or sets a value indicating how many parallel worker threads shall be used + when parallelization is applicable. + + Default to the number of processor cores, must be between 1 and 1024 (inclusive). + + + + Gets or sets the TaskScheduler used to schedule the worker tasks. + + + + + Gets or sets the the block size to use for + the native linear algebra provider. + + The block size. Default 512, must be at least 32. + + + + Gets or sets the order of the matrix when linear algebra provider + must calculate multiply in parallel threads. + + The order. Default 64, must be at least 3. + + + + Gets or sets the number of elements a vector or matrix + must contain before we multiply threads. + + Number of elements. Default 300, must be at least 3. + + + + Numerical Derivative. + + + + + Initialized a NumericalDerivative with the given points and center. + + + + + Initialized a NumericalDerivative with the default points and center for the given order. + + + + + Evaluates the derivative of a scalar univariate function. + + Univariate function handle. + Point at which to evaluate the derivative. + Derivative order. + + + + Creates a function handle for the derivative of a scalar univariate function. + + Univariate function handle. + Derivative order. + + + + Evaluates the first derivative of a scalar univariate function. + + Univariate function handle. + Point at which to evaluate the derivative. + + + + Creates a function handle for the first derivative of a scalar univariate function. + + Univariate function handle. + + + + Evaluates the second derivative of a scalar univariate function. + + Univariate function handle. + Point at which to evaluate the derivative. + + + + Creates a function handle for the second derivative of a scalar univariate function. + + Univariate function handle. + + + + Evaluates the partial derivative of a multivariate function. + + Multivariate function handle. + Vector at which to evaluate the derivative. + Index of independent variable for partial derivative. + Derivative order. + + + + Creates a function handle for the partial derivative of a multivariate function. + + Multivariate function handle. + Index of independent variable for partial derivative. + Derivative order. + + + + Evaluates the first partial derivative of a multivariate function. + + Multivariate function handle. + Vector at which to evaluate the derivative. + Index of independent variable for partial derivative. + + + + Creates a function handle for the first partial derivative of a multivariate function. + + Multivariate function handle. + Index of independent variable for partial derivative. + + + + Evaluates the partial derivative of a bivariate function. + + Bivariate function handle. + First argument at which to evaluate the derivative. + Second argument at which to evaluate the derivative. + Index of independent variable for partial derivative. + Derivative order. + + + + Creates a function handle for the partial derivative of a bivariate function. + + Bivariate function handle. + Index of independent variable for partial derivative. + Derivative order. + + + + Evaluates the first partial derivative of a bivariate function. + + Bivariate function handle. + First argument at which to evaluate the derivative. + Second argument at which to evaluate the derivative. + Index of independent variable for partial derivative. + + + + Creates a function handle for the first partial derivative of a bivariate function. + + Bivariate function handle. + Index of independent variable for partial derivative. + + + + Class to calculate finite difference coefficients using Taylor series expansion method. + + + For n points, coefficients are calculated up to the maximum derivative order possible (n-1). + The current function value position specifies the "center" for surrounding coefficients. + Selecting the first, middle or last positions represent forward, backwards and central difference methods. + + + + + + + Number of points for finite difference coefficients. Changing this value recalculates the coefficients table. + + + + + Initializes a new instance of the class. + + Number of finite difference coefficients. + + + + Gets the finite difference coefficients for a specified center and order. + + Current function position with respect to coefficients. Must be within point range. + Order of finite difference coefficients. + Vector of finite difference coefficients. + + + + Gets the finite difference coefficients for all orders at a specified center. + + Current function position with respect to coefficients. Must be within point range. + Rectangular array of coefficients, with columns specifing order. + + + + Type of finite different step size. + + + + + The absolute step size value will be used in numerical derivatives, regardless of order or function parameters. + + + + + A base step size value, h, will be scaled according to the function input parameter. A common example is hx = h*(1+abs(x)), however + this may vary depending on implementation. This definition only guarantees that the only scaling will be relative to the + function input parameter and not the order of the finite difference derivative. + + + + + A base step size value, eps (typically machine precision), is scaled according to the finite difference coefficient order + and function input parameter. The initial scaling according to finite different coefficient order can be thought of as producing a + base step size, h, that is equivalent to scaling. This stepsize is then scaled according to the function + input parameter. Although implementation may vary, an example of second order accurate scaling may be (eps)^(1/3)*(1+abs(x)). + + + + + Class to evaluate the numerical derivative of a function using finite difference approximations. + Variable point and center methods can be initialized . + This class can also be used to return function handles (delegates) for a fixed derivative order and variable. + It is possible to evaluate the derivative and partial derivative of univariate and multivariate functions respectively. + + + + + Initializes a NumericalDerivative class with the default 3 point center difference method. + + + + + Initialized a NumericalDerivative class. + + Number of points for finite difference derivatives. + Location of the center with respect to other points. Value ranges from zero to points-1. + + + + Sets and gets the finite difference step size. This value is for each function evaluation if relative stepsize types are used. + If the base step size used in scaling is desired, see . + + + Setting then getting the StepSize may return a different value. This is not unusual since a user-defined step size is converted to a + base-2 representable number to improve finite difference accuracy. + + + + + Sets and gets the base fininte difference step size. This assigned value to this parameter is only used if is set to RelativeX. + However, if the StepType is Relative, it will contain the base step size computed from based on the finite difference order. + + + + + Sets and gets the base finite difference step size. This parameter is only used if is set to Relative. + By default this is set to machine epsilon, from which is computed. + + + + + Sets and gets the location of the center point for the finite difference derivative. + + + + + Number of times a function is evaluated for numerical derivatives. + + + + + Type of step size for computing finite differences. If set to absolute, dx = h. + If set to relative, dx = (1+abs(x))*h^(2/(order+1)). This provides accurate results when + h is approximately equal to the square-root of machine accuracy, epsilon. + + + + + Evaluates the derivative of equidistant points using the finite difference method. + + Vector of points StepSize apart. + Derivative order. + Finite difference step size. + Derivative of points of the specified order. + + + + Evaluates the derivative of a scalar univariate function. + + + Supplying the optional argument currentValue will reduce the number of function evaluations + required to calculate the finite difference derivative. + + Function handle. + Point at which to compute the derivative. + Derivative order. + Current function value at center. + Function derivative at x of the specified order. + + + + Creates a function handle for the derivative of a scalar univariate function. + + Input function handle. + Derivative order. + Function handle that evaluates the derivative of input function at a fixed order. + + + + Evaluates the partial derivative of a multivariate function. + + Multivariate function handle. + Vector at which to evaluate the derivative. + Index of independent variable for partial derivative. + Derivative order. + Current function value at center. + Function partial derivative at x of the specified order. + + + + Evaluates the partial derivatives of a multivariate function array. + + + This function assumes the input vector x is of the correct length for f. + + Multivariate vector function array handle. + Vector at which to evaluate the derivatives. + Index of independent variable for partial derivative. + Derivative order. + Current function value at center. + Vector of functions partial derivatives at x of the specified order. + + + + Creates a function handle for the partial derivative of a multivariate function. + + Input function handle. + Index of the independent variable for partial derivative. + Derivative order. + Function handle that evaluates partial derivative of input function at a fixed order. + + + + Creates a function handle for the partial derivative of a vector multivariate function. + + Input function handle. + Index of the independent variable for partial derivative. + Derivative order. + Function handle that evaluates partial derivative of input function at fixed order. + + + + Evaluates the mixed partial derivative of variable order for multivariate functions. + + + This function recursively uses to evaluate mixed partial derivative. + Therefore, it is more efficient to call for higher order derivatives of + a single independent variable. + + Multivariate function handle. + Points at which to evaluate the derivative. + Vector of indices for the independent variables at descending derivative orders. + Highest order of differentiation. + Current function value at center. + Function mixed partial derivative at x of the specified order. + + + + Evaluates the mixed partial derivative of variable order for multivariate function arrays. + + + This function recursively uses to evaluate mixed partial derivative. + Therefore, it is more efficient to call for higher order derivatives of + a single independent variable. + + Multivariate function array handle. + Vector at which to evaluate the derivative. + Vector of indices for the independent variables at descending derivative orders. + Highest order of differentiation. + Current function value at center. + Function mixed partial derivatives at x of the specified order. + + + + Creates a function handle for the mixed partial derivative of a multivariate function. + + Input function handle. + Vector of indices for the independent variables at descending derivative orders. + Highest derivative order. + Function handle that evaluates the fixed mixed partial derivative of input function at fixed order. + + + + Creates a function handle for the mixed partial derivative of a multivariate vector function. + + Input vector function handle. + Vector of indices for the independent variables at descending derivative orders. + Highest derivative order. + Function handle that evaluates the fixed mixed partial derivative of input function at fixed order. + + + + Resets the evaluation counter. + + + + + Class for evaluating the Hessian of a smooth continuously differentiable function using finite differences. + By default, a central 3-point method is used. + + + + + Number of function evaluations. + + + + + Creates a numerical Hessian object with a three point central difference method. + + + + + Creates a numerical Hessian with a specified differentiation scheme. + + Number of points for Hessian evaluation. + Center point for differentiation. + + + + Evaluates the Hessian of the scalar univariate function f at points x. + + Scalar univariate function handle. + Point at which to evaluate Hessian. + Hessian tensor. + + + + Evaluates the Hessian of a multivariate function f at points x. + + + This method of computing the Hessian is only vaid for Lipschitz continuous functions. + The function mirrors the Hessian along the diagonal since d2f/dxdy = d2f/dydx for continuously differentiable functions. + + Multivariate function handle.> + Points at which to evaluate Hessian.> + Hessian tensor. + + + + Resets the function evaluation counter for the Hessian. + + + + + Class for evaluating the Jacobian of a function using finite differences. + By default, a central 3-point method is used. + + + + + Number of function evaluations. + + + + + Creates a numerical Jacobian object with a three point central difference method. + + + + + Creates a numerical Jacobian with a specified differentiation scheme. + + Number of points for Jacobian evaluation. + Center point for differentiation. + + + + Evaluates the Jacobian of scalar univariate function f at point x. + + Scalar univariate function handle. + Point at which to evaluate Jacobian. + Jacobian vector. + + + + Evaluates the Jacobian of a multivariate function f at vector x. + + + This function assumes that the length of vector x consistent with the argument count of f. + + Multivariate function handle. + Points at which to evaluate Jacobian. + Jacobian vector. + + + + Evaluates the Jacobian of a multivariate function f at vector x given a current function value. + + + To minimize the number of function evaluations, a user can supply the current value of the function + to be used in computing the Jacobian. This value must correspond to the "center" location for the + finite differencing. If a scheme is used where the center value is not evaluated, this will provide no + added efficiency. This method also assumes that the length of vector x consistent with the argument count of f. + + Multivariate function handle. + Points at which to evaluate Jacobian. + Current function value at finite difference center. + Jacobian vector. + + + + Evaluates the Jacobian of a multivariate function array f at vector x. + + Multivariate function array handle. + Vector at which to evaluate Jacobian. + Jacobian matrix. + + + + Evaluates the Jacobian of a multivariate function array f at vector x given a vector of current function values. + + + To minimize the number of function evaluations, a user can supply a vector of current values of the functions + to be used in computing the Jacobian. These value must correspond to the "center" location for the + finite differencing. If a scheme is used where the center value is not evaluated, this will provide no + added efficiency. This method also assumes that the length of vector x consistent with the argument count of f. + + Multivariate function array handle. + Vector at which to evaluate Jacobian. + Vector of current function values. + Jacobian matrix. + + + + Resets the function evaluation counter for the Jacobian. + + + + + Metrics to measure the distance between two structures. + + + + + Sum of Absolute Difference (SAD), i.e. the L1-norm (Manhattan) of the difference. + + + + + Sum of Absolute Difference (SAD), i.e. the L1-norm (Manhattan) of the difference. + + + + + Sum of Absolute Difference (SAD), i.e. the L1-norm (Manhattan) of the difference. + + + + + Mean-Absolute Error (MAE), i.e. the normalized L1-norm (Manhattan) of the difference. + + + + + Mean-Absolute Error (MAE), i.e. the normalized L1-norm (Manhattan) of the difference. + + + + + Mean-Absolute Error (MAE), i.e. the normalized L1-norm (Manhattan) of the difference. + + + + + Sum of Squared Difference (SSD), i.e. the squared L2-norm (Euclidean) of the difference. + + + + + Sum of Squared Difference (SSD), i.e. the squared L2-norm (Euclidean) of the difference. + + + + + Sum of Squared Difference (SSD), i.e. the squared L2-norm (Euclidean) of the difference. + + + + + Mean-Squared Error (MSE), i.e. the normalized squared L2-norm (Euclidean) of the difference. + + + + + Mean-Squared Error (MSE), i.e. the normalized squared L2-norm (Euclidean) of the difference. + + + + + Mean-Squared Error (MSE), i.e. the normalized squared L2-norm (Euclidean) of the difference. + + + + + Euclidean Distance, i.e. the L2-norm of the difference. + + + + + Euclidean Distance, i.e. the L2-norm of the difference. + + + + + Euclidean Distance, i.e. the L2-norm of the difference. + + + + + Manhattan Distance, i.e. the L1-norm of the difference. + + + + + Manhattan Distance, i.e. the L1-norm of the difference. + + + + + Manhattan Distance, i.e. the L1-norm of the difference. + + + + + Chebyshev Distance, i.e. the Infinity-norm of the difference. + + + + + Chebyshev Distance, i.e. the Infinity-norm of the difference. + + + + + Chebyshev Distance, i.e. the Infinity-norm of the difference. + + + + + Minkowski Distance, i.e. the generalized p-norm of the difference. + + + + + Minkowski Distance, i.e. the generalized p-norm of the difference. + + + + + Minkowski Distance, i.e. the generalized p-norm of the difference. + + + + + Canberra Distance, a weighted version of the L1-norm of the difference. + + + + + Canberra Distance, a weighted version of the L1-norm of the difference. + + + + + Cosine Distance, representing the angular distance while ignoring the scale. + + + + + Cosine Distance, representing the angular distance while ignoring the scale. + + + + + Hamming Distance, i.e. the number of positions that have different values in the vectors. + + + + + Hamming Distance, i.e. the number of positions that have different values in the vectors. + + + + + Pearson's distance, i.e. 1 - the person correlation coefficient. + + + + + Jaccard distance, i.e. 1 - the Jaccard index. + + Thrown if a or b are null. + Throw if a and b are of different lengths. + Jaccard distance. + + + + Jaccard distance, i.e. 1 - the Jaccard index. + + Thrown if a or b are null. + Throw if a and b are of different lengths. + Jaccard distance. + + + + Discrete Univariate Bernoulli distribution. + The Bernoulli distribution is a distribution over bits. The parameter + p specifies the probability that a 1 is generated. + Wikipedia - Bernoulli distribution. + + + + + Initializes a new instance of the Bernoulli class. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + If the Bernoulli parameter is not in the range [0,1]. + + + + Initializes a new instance of the Bernoulli class. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + The random number generator which is used to draw random samples. + If the Bernoulli parameter is not in the range [0,1]. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Gets the probability of generating a one. Range: 0 ≤ p ≤ 1. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the mode of the distribution. + + + + + Gets all modes of the distribution. + + + + + Gets the median of the distribution. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + the cumulative distribution at location . + + + + + Generates one sample from the Bernoulli distribution. + + The random source to use. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + A random sample from the Bernoulli distribution. + + + + Samples a Bernoulli distributed random variable. + + A sample from the Bernoulli distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of Bernoulli distributed random variables. + + a sequence of samples from the distribution. + + + + Samples a Bernoulli distributed random variable. + + The random number generator to use. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + A sample from the Bernoulli distribution. + + + + Samples a sequence of Bernoulli distributed random variables. + + The random number generator to use. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + a sequence of samples from the distribution. + + + + Samples a Bernoulli distributed random variable. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + A sample from the Bernoulli distribution. + + + + Samples a sequence of Bernoulli distributed random variables. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + a sequence of samples from the distribution. + + + + Continuous Univariate Beta distribution. + For details about this distribution, see + Wikipedia - Beta distribution. + + + There are a few special cases for the parameterization of the Beta distribution. When both + shape parameters are positive infinity, the Beta distribution degenerates to a point distribution + at 0.5. When one of the shape parameters is positive infinity, the distribution degenerates to a point + distribution at the positive infinity. When both shape parameters are 0.0, the Beta distribution + degenerates to a Bernoulli distribution with parameter 0.5. When one shape parameter is 0.0, the + distribution degenerates to a point distribution at the non-zero shape parameter. + + + + + Initializes a new instance of the Beta class. + + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + + + + Initializes a new instance of the Beta class. + + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + A string representation of the Beta distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + + + + Gets the α shape parameter of the Beta distribution. Range: α ≥ 0. + + + + + Gets the β shape parameter of the Beta distribution. Range: β ≥ 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the Beta distribution. + + + + + Gets the variance of the Beta distribution. + + + + + Gets the standard deviation of the Beta distribution. + + + + + Gets the entropy of the Beta distribution. + + + + + Gets the skewness of the Beta distribution. + + + + + Gets the mode of the Beta distribution; when there are multiple answers, this routine will return 0.5. + + + + + Gets the median of the Beta distribution. + + + + + Gets the minimum of the Beta distribution. + + + + + Gets the maximum of the Beta distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Generates a sample from the Beta distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Beta distribution. + + a sequence of samples from the distribution. + + + + Samples Beta distributed random variables by sampling two Gamma variables and normalizing. + + The random number generator to use. + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + a random number from the Beta distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Generates a sample from the distribution. + + The random number generator to use. + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Initializes a new instance of the BetaScaled class. + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + + + + Initializes a new instance of the BetaScaled class. + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The random number generator which is used to draw random samples. + + + + Create a Beta PERT distribution, used in risk analysis and other domains where an expert forecast + is used to construct an underlying beta distribution. + + The minimum value. + The maximum value. + The most likely value (mode). + The random number generator which is used to draw random samples. + The Beta distribution derived from the PERT parameters. + + + + A string representation of the distribution. + + A string representation of the BetaScaled distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + + + + Gets the α shape parameter of the BetaScaled distribution. Range: α > 0. + + + + + Gets the β shape parameter of the BetaScaled distribution. Range: β > 0. + + + + + Gets the location (μ) of the BetaScaled distribution. + + + + + Gets the scale (σ) of the BetaScaled distribution. Range: σ > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the BetaScaled distribution. + + + + + Gets the variance of the BetaScaled distribution. + + + + + Gets the standard deviation of the BetaScaled distribution. + + + + + Gets the entropy of the BetaScaled distribution. + + + + + Gets the skewness of the BetaScaled distribution. + + + + + Gets the mode of the BetaScaled distribution; when there are multiple answers, this routine will return 0.5. + + + + + Gets the median of the BetaScaled distribution. + + + + + Gets the minimum of the BetaScaled distribution. + + + + + Gets the maximum of the BetaScaled distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Generates a sample from the distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Generates a sample from the distribution. + + The random number generator to use. + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Discrete Univariate Binomial distribution. + For details about this distribution, see + Wikipedia - Binomial distribution. + + + The distribution is parameterized by a probability (between 0.0 and 1.0). + + + + + Initializes a new instance of the Binomial class. + + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + If is not in the interval [0.0,1.0]. + If is negative. + + + + Initializes a new instance of the Binomial class. + + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + The random number generator which is used to draw random samples. + If is not in the interval [0.0,1.0]. + If is negative. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + + + + Gets the success probability in each trial. Range: 0 ≤ p ≤ 1. + + + + + Gets the number of trials. Range: n ≥ 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the mode of the distribution. + + + + + Gets all modes of the distribution. + + + + + Gets the median of the distribution. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + the cumulative distribution at location . + + + + + Generates a sample from the Binomial distribution without doing parameter checking. + + The random number generator to use. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + The number of successful trials. + + + + Samples a Binomially distributed random variable. + + The number of successes in N trials. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of Binomially distributed random variables. + + a sequence of successes in N trials. + + + + Samples a binomially distributed random variable. + + The random number generator to use. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + The number of successes in trials. + + + + Samples a sequence of binomially distributed random variable. + + The random number generator to use. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + a sequence of successes in trials. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + a sequence of successes in trials. + + + + Samples a binomially distributed random variable. + + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + The number of successes in trials. + + + + Samples a sequence of binomially distributed random variable. + + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + a sequence of successes in trials. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + a sequence of successes in trials. + + + + Discrete Univariate Categorical distribution. + For details about this distribution, see + Wikipedia - Categorical distribution. This + distribution is sometimes called the Discrete distribution. + + + The distribution is parameterized by a vector of ratios: in other words, the parameter + does not have to be normalized and sum to 1. The reason is that some vectors can't be exactly normalized + to sum to 1 in floating point representation. + + + Support: 0..k where k = length(probability mass array)-1 + + + + + Initializes a new instance of the Categorical class. + + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + If any of the probabilities are negative or do not sum to one. + + + + Initializes a new instance of the Categorical class. + + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + The random number generator which is used to draw random samples. + If any of the probabilities are negative or do not sum to one. + + + + Initializes a new instance of the Categorical class from a . The distribution + will not be automatically updated when the histogram changes. The categorical distribution will have + one value for each bucket and a probability for that value proportional to the bucket count. + + The histogram from which to create the categorical variable. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Checks whether the parameters of the distribution are valid. + + An array of nonnegative ratios: this array does not need to be normalized as this is often impossible using floating point arithmetic. + If any of the probabilities are negative returns false, or if the sum of parameters is 0.0; otherwise true + + + + Checks whether the parameters of the distribution are valid. + + An array of nonnegative ratios: this array does not need to be normalized as this is often impossible using floating point arithmetic. + If any of the probabilities are negative returns false, or if the sum of parameters is 0.0; otherwise true + + + + Gets the probability mass vector (non-negative ratios) of the multinomial. + + Sometimes the normalized probability vector cannot be represented exactly in a floating point representation. + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + Throws a . + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Gets he mode of the distribution. + + Throws a . + + + + Gets the median of the distribution. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. + + A real number between 0 and 1. + An integer between 0 and the size of the categorical (exclusive), that corresponds to the inverse CDF for the given probability. + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. + + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + A real number between 0 and 1. + An integer between 0 and the size of the categorical (exclusive), that corresponds to the inverse CDF for the given probability. + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. + + An array corresponding to a CDF for a categorical distribution. Not assumed to be normalized. + A real number between 0 and 1. + An integer between 0 and the size of the categorical (exclusive), that corresponds to the inverse CDF for the given probability. + + + + Computes the cumulative distribution function. This method performs no parameter checking. + If the probability mass was normalized, the resulting cumulative distribution is normalized as well (up to numerical errors). + + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + An array representing the unnormalized cumulative distribution function. + + + + Returns one trials from the categorical distribution. + + The random number generator to use. + The (unnormalized) cumulative distribution of the probability distribution. + One sample from the categorical distribution implied by . + + + + Samples a Binomially distributed random variable. + + The number of successful trials. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of Bernoulli distributed random variables. + + a sequence of successful trial counts. + + + + Samples one categorical distributed random variable; also known as the Discrete distribution. + + The random number generator to use. + An array of nonnegative ratios. Not assumed to be normalized. + One random integer between 0 and the size of the categorical (exclusive). + + + + Samples a categorically distributed random variable. + + The random number generator to use. + An array of nonnegative ratios. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + An array of nonnegative ratios. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Samples one categorical distributed random variable; also known as the Discrete distribution. + + An array of nonnegative ratios. Not assumed to be normalized. + One random integer between 0 and the size of the categorical (exclusive). + + + + Samples a categorically distributed random variable. + + An array of nonnegative ratios. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + An array of nonnegative ratios. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Samples one categorical distributed random variable; also known as the Discrete distribution. + + The random number generator to use. + An array of the cumulative distribution. Not assumed to be normalized. + One random integer between 0 and the size of the categorical (exclusive). + + + + Samples a categorically distributed random variable. + + The random number generator to use. + An array of the cumulative distribution. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + An array of the cumulative distribution. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Samples one categorical distributed random variable; also known as the Discrete distribution. + + An array of the cumulative distribution. Not assumed to be normalized. + One random integer between 0 and the size of the categorical (exclusive). + + + + Samples a categorically distributed random variable. + + An array of the cumulative distribution. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + An array of the cumulative distribution. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Continuous Univariate Cauchy distribution. + The Cauchy distribution is a symmetric continuous probability distribution. For details about this distribution, see + Wikipedia - Cauchy distribution. + + + + + Initializes a new instance of the class with the location parameter set to 0 and the scale parameter set to 1 + + + + + Initializes a new instance of the class. + + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + + + + Initializes a new instance of the class. + + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + + + + Gets the location (x0) of the distribution. + + + + + Gets the scale (γ) of the distribution. Range: γ > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Draws a random sample from the distribution. + + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Cauchy distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + the inverse cumulative density at . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + a sequence of samples from the distribution. + + + + Continuous Univariate Chi distribution. + This distribution is a continuous probability distribution. The distribution usually arises when a k-dimensional vector's orthogonal + components are independent and each follow a standard normal distribution. The length of the vector will + then have a chi distribution. + Wikipedia - Chi distribution. + + + + + Initializes a new instance of the class. + + The degrees of freedom (k) of the distribution. Range: k > 0. + + + + Initializes a new instance of the class. + + The degrees of freedom (k) of the distribution. Range: k > 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The degrees of freedom (k) of the distribution. Range: k > 0. + + + + Gets the degrees of freedom (k) of the Chi distribution. Range: k > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Generates a sample from the Chi distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Chi distribution. + + a sequence of samples from the distribution. + + + + Samples the distribution. + + The random number generator to use. + The degrees of freedom (k) of the distribution. Range: k > 0. + a random number from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The degrees of freedom (k) of the distribution. Range: k > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The degrees of freedom (k) of the distribution. Range: k > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The degrees of freedom (k) of the distribution. Range: k > 0. + the cumulative distribution at location . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The degrees of freedom (k) of the distribution. Range: k > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sequence of samples from the distribution. + + + + Continuous Univariate Chi-Squared distribution. + This distribution is a sum of the squares of k independent standard normal random variables. + Wikipedia - ChiSquare distribution. + + + + + Initializes a new instance of the class. + + The degrees of freedom (k) of the distribution. Range: k > 0. + + + + Initializes a new instance of the class. + + The degrees of freedom (k) of the distribution. Range: k > 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The degrees of freedom (k) of the distribution. Range: k > 0. + + + + Gets the degrees of freedom (k) of the Chi-Squared distribution. Range: k > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Generates a sample from the ChiSquare distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the ChiSquare distribution. + + a sequence of samples from the distribution. + + + + Samples the distribution. + + The random number generator to use. + The degrees of freedom (k) of the distribution. Range: k > 0. + a random number from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The degrees of freedom (k) of the distribution. Range: k > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The degrees of freedom (k) of the distribution. Range: k > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The degrees of freedom (k) of the distribution. Range: k > 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The degrees of freedom (k) of the distribution. Range: k > 0. + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + Generates a sample from the ChiSquare distribution. + + The random number generator to use. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Generates a sample from the ChiSquare distribution. + + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Continuous Univariate Uniform distribution. + The continuous uniform distribution is a distribution over real numbers. For details about this distribution, see + Wikipedia - Continuous uniform distribution. + + + + + Initializes a new instance of the ContinuousUniform class with lower bound 0 and upper bound 1. + + + + + Initializes a new instance of the ContinuousUniform class with given lower and upper bounds. + + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + If the upper bound is smaller than the lower bound. + + + + Initializes a new instance of the ContinuousUniform class with given lower and upper bounds. + + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + The random number generator which is used to draw random samples. + If the upper bound is smaller than the lower bound. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + + + + Gets the lower bound of the distribution. + + + + + Gets the upper bound of the distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + + Gets the median of the distribution. + + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Generates a sample from the ContinuousUniform distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the ContinuousUniform distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + the inverse cumulative density at . + + + + + Generates a sample from the ContinuousUniform distribution. + + The random number generator to use. + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + a uniformly distributed sample. + + + + Generates a sequence of samples from the ContinuousUniform distribution. + + The random number generator to use. + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + a sequence of uniformly distributed samples. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + a sequence of samples from the distribution. + + + + Generates a sample from the ContinuousUniform distribution. + + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + a uniformly distributed sample. + + + + Generates a sequence of samples from the ContinuousUniform distribution. + + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + a sequence of uniformly distributed samples. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + a sequence of samples from the distribution. + + + + Discrete Univariate Conway-Maxwell-Poisson distribution. + The Conway-Maxwell-Poisson distribution is a generalization of the Poisson, Geometric and Bernoulli + distributions. It is parameterized by two real numbers "lambda" and "nu". For + + nu = 0 the distribution reverts to a Geometric distribution + nu = 1 the distribution reverts to the Poisson distribution + nu -> infinity the distribution converges to a Bernoulli distribution + + This implementation will cache the value of the normalization constant. + Wikipedia - ConwayMaxwellPoisson distribution. + + + + + The mean of the distribution. + + + + + The variance of the distribution. + + + + + Caches the value of the normalization constant. + + + + + Since many properties of the distribution can only be computed approximately, the tolerance + level specifies how much error we accept. + + + + + Initializes a new instance of the class. + + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Initializes a new instance of the class. + + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + The random number generator which is used to draw random samples. + + + + Returns a that represents this instance. + + A that represents this instance. + + + + Tests whether the provided values are valid parameters for this distribution. + + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Gets the lambda (λ) parameter. Range: λ > 0. + + + + + Gets the rate of decay (ν) parameter. Range: ν ≥ 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution + + + + + Gets the median of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + the cumulative distribution at location . + + + + + Gets the normalization constant of the Conway-Maxwell-Poisson distribution. + + + + + Computes an approximate normalization constant for the CMP distribution. + + The lambda (λ) parameter for the CMP distribution. + The rate of decay (ν) parameter for the CMP distribution. + + an approximate normalization constant for the CMP distribution. + + + + + Returns one trials from the distribution. + + The random number generator to use. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + The z parameter. + + One sample from the distribution implied by , , and . + + + + + Samples a Conway-Maxwell-Poisson distributed random variable. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Samples a sequence of a Conway-Maxwell-Poisson distributed random variables. + + + a sequence of samples from a Conway-Maxwell-Poisson distribution. + + + + + Samples a random variable. + + The random number generator to use. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Samples a sequence of this random variable. + + The random number generator to use. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Samples a random variable. + + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Samples a sequence of this random variable. + + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Multivariate Dirichlet distribution. For details about this distribution, see + Wikipedia - Dirichlet distribution. + + + + + Initializes a new instance of the Dirichlet class. The distribution will + be initialized with the default random number generator. + + An array with the Dirichlet parameters. + + + + Initializes a new instance of the Dirichlet class. The distribution will + be initialized with the default random number generator. + + An array with the Dirichlet parameters. + The random number generator which is used to draw random samples. + + + + Initializes a new instance of the class. + random number generator. + The value of each parameter of the Dirichlet distribution. + The dimension of the Dirichlet distribution. + + + + Initializes a new instance of the class. + random number generator. + The value of each parameter of the Dirichlet distribution. + The dimension of the Dirichlet distribution. + The random number generator which is used to draw random samples. + + + + Returns a that represents this instance. + + + A that represents this instance. + + + + + Tests whether the provided values are valid parameters for this distribution. + No parameter can be less than zero and at least one parameter should be larger than zero. + + The parameters of the Dirichlet distribution. + + + + Gets or sets the parameters of the Dirichlet distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the dimension of the Dirichlet distribution. + + + + + Gets the sum of the Dirichlet parameters. + + + + + Gets the mean of the Dirichlet distribution. + + + + + Gets the variance of the Dirichlet distribution. + + + + + Gets the entropy of the distribution. + + + + + Computes the density of the distribution. + + The locations at which to compute the density. + the density at . + The Dirichlet distribution requires that the sum of the components of x equals 1. + You can also leave out the last component, and it will be computed from the others. + + + + Computes the log density of the distribution. + + The locations at which to compute the density. + the density at . + + + + Samples a Dirichlet distributed random vector. + + A sample from this distribution. + + + + Samples a Dirichlet distributed random vector. + + The random number generator to use. + The Dirichlet distribution parameter. + a sample from the distribution. + + + + Discrete Univariate Uniform distribution. + The discrete uniform distribution is a distribution over integers. The distribution + is parameterized by a lower and upper bound (both inclusive). + Wikipedia - Discrete uniform distribution. + + + + + Initializes a new instance of the DiscreteUniform class. + + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + + + + Initializes a new instance of the DiscreteUniform class. + + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + The random number generator which is used to draw random samples. + + + + Returns a that represents this instance. + + + A that represents this instance. + + + + + Tests whether the provided values are valid parameters for this distribution. + + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + + + + Gets the inclusive lower bound of the probability distribution. + + + + + Gets the inclusive upper bound of the probability distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the mode of the distribution; since every element in the domain has the same probability this method returns the middle one. + + + + + Gets the median of the distribution. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + the cumulative distribution at location . + + + + + Generates one sample from the discrete uniform distribution. This method does not do any parameter checking. + + The random source to use. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + A random sample from the discrete uniform distribution. + + + + Draws a random sample from the distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of uniformly distributed random variables. + + a sequence of samples from the distribution. + + + + Samples a uniformly distributed random variable. + + The random number generator to use. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + A sample from the discrete uniform distribution. + + + + Samples a sequence of uniformly distributed random variables. + + The random number generator to use. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + a sequence of samples from the discrete uniform distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + a sequence of samples from the discrete uniform distribution. + + + + Samples a uniformly distributed random variable. + + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + A sample from the discrete uniform distribution. + + + + Samples a sequence of uniformly distributed random variables. + + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + a sequence of samples from the discrete uniform distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + a sequence of samples from the discrete uniform distribution. + + + + Continuous Univariate Erlang distribution. + This distribution is is a continuous probability distribution with wide applicability primarily due to its + relation to the exponential and Gamma distributions. + Wikipedia - Erlang distribution. + + + + + Initializes a new instance of the class. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + + + + Initializes a new instance of the class. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + The random number generator which is used to draw random samples. + + + + Constructs a Erlang distribution from a shape and scale parameter. The distribution will + be initialized with the default random number generator. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The scale (μ) of the Erlang distribution. Range: μ ≥ 0. + The random number generator which is used to draw random samples. Optional, can be null. + + + + Constructs a Erlang distribution from a shape and inverse scale parameter. The distribution will + be initialized with the default random number generator. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + The random number generator which is used to draw random samples. Optional, can be null. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + + + + Gets the shape (k) of the Erlang distribution. Range: k ≥ 0. + + + + + Gets the rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + + + + + Gets the scale of the Erlang distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum value. + + + + + Gets the Maximum value. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Generates a sample from the Erlang distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Erlang distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + the cumulative distribution at location . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Continuous Univariate Exponential distribution. + The exponential distribution is a distribution over the real numbers parameterized by one non-negative parameter. + Wikipedia - exponential distribution. + + + + + Initializes a new instance of the class. + + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + + + + Initializes a new instance of the class. + + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + + + + Gets the rate (λ) parameter of the distribution. Range: λ ≥ 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Draws a random sample from the distribution. + + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Exponential distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + the inverse cumulative density at . + + + + + Draws a random sample from the distribution. + + The random number generator to use. + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Generates a sequence of samples from the Exponential distribution. + + The random number generator to use. + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Draws a random sample from the distribution. + + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Generates a sequence of samples from the Exponential distribution. + + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Continuous Univariate F-distribution, also known as Fisher-Snedecor distribution. + For details about this distribution, see + Wikipedia - FisherSnedecor distribution. + + + + + Initializes a new instance of the class. + + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + + + + Initializes a new instance of the class. + + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + + + + Gets the first degree of freedom (d1) of the distribution. Range: d1 > 0. + + + + + Gets the second degree of freedom (d2) of the distribution. Range: d2 > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Generates a sample from the FisherSnedecor distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the FisherSnedecor distribution. + + a sequence of samples from the distribution. + + + + Generates one sample from the FisherSnedecor distribution without parameter checking. + + The random number generator to use. + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + a FisherSnedecor distributed random number. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Generates a sample from the distribution. + + The random number generator to use. + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + a sequence of samples from the distribution. + + + + Continuous Univariate Gamma distribution. + For details about this distribution, see + Wikipedia - Gamma distribution. + + + The Gamma distribution is parametrized by a shape and inverse scale parameter. When we want + to specify a Gamma distribution which is a point distribution we set the shape parameter to be the + location of the point distribution and the inverse scale as positive infinity. The distribution + with shape and inverse scale both zero is undefined. + + Random number generation for the Gamma distribution is based on the algorithm in: + "A Simple Method for Generating Gamma Variables" - Marsaglia & Tsang + ACM Transactions on Mathematical Software, Vol. 26, No. 3, September 2000, Pages 363–372. + + + + + Initializes a new instance of the Gamma class. + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + + + + Initializes a new instance of the Gamma class. + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + The random number generator which is used to draw random samples. + + + + Constructs a Gamma distribution from a shape and scale parameter. The distribution will + be initialized with the default random number generator. + + The shape (k) of the Gamma distribution. Range: k ≥ 0. + The scale (θ) of the Gamma distribution. Range: θ ≥ 0 + The random number generator which is used to draw random samples. Optional, can be null. + + + + Constructs a Gamma distribution from a shape and inverse scale parameter. The distribution will + be initialized with the default random number generator. + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + The random number generator which is used to draw random samples. Optional, can be null. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + + + + Gets or sets the shape (k, α) of the Gamma distribution. Range: α ≥ 0. + + + + + Gets or sets the rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + + + + + Gets or sets the scale (θ) of the Gamma distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the Gamma distribution. + + + + + Gets the variance of the Gamma distribution. + + + + + Gets the standard deviation of the Gamma distribution. + + + + + Gets the entropy of the Gamma distribution. + + + + + Gets the skewness of the Gamma distribution. + + + + + Gets the mode of the Gamma distribution. + + + + + Gets the median of the Gamma distribution. + + + + + Gets the minimum of the Gamma distribution. + + + + + Gets the maximum of the Gamma distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Generates a sample from the Gamma distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Gamma distribution. + + a sequence of samples from the distribution. + + + + Sampling implementation based on: + "A Simple Method for Generating Gamma Variables" - Marsaglia & Tsang + ACM Transactions on Mathematical Software, Vol. 26, No. 3, September 2000, Pages 363–372. + This method performs no parameter checks. + + The random number generator to use. + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + A sample from a Gamma distributed random variable. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + the inverse cumulative density at . + + + + + Generates a sample from the Gamma distribution. + + The random number generator to use. + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the Gamma distribution. + + The random number generator to use. + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Generates a sample from the Gamma distribution. + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the Gamma distribution. + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Discrete Univariate Geometric distribution. + The Geometric distribution is a distribution over positive integers parameterized by one positive real number. + This implementation of the Geometric distribution will never generate 0's. + Wikipedia - geometric distribution. + + + + + Initializes a new instance of the Geometric class. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Initializes a new instance of the Geometric class. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + The random number generator which is used to draw random samples. + + + + Returns a that represents this instance. + + A that represents this instance. + + + + Tests whether the provided values are valid parameters for this distribution. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Gets the probability of generating a one. Range: 0 ≤ p ≤ 1. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + Throws a not supported exception. + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + the cumulative distribution at location . + + + + + Returns one sample from the distribution. + + The random number generator to use. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + One sample from the distribution implied by . + + + + Samples a Geometric distributed random variable. + + A sample from the Geometric distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of Geometric distributed random variables. + + a sequence of samples from the distribution. + + + + Samples a random variable. + + The random number generator to use. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Samples a sequence of this random variable. + + The random number generator to use. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Samples a random variable. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Samples a sequence of this random variable. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Discrete Univariate Hypergeometric distribution. + This distribution is a discrete probability distribution that describes the number of successes in a sequence + of n draws from a finite population without replacement, just as the binomial distribution + describes the number of successes for draws with replacement + Wikipedia - Hypergeometric distribution. + + + + + Initializes a new instance of the Hypergeometric class. + + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Initializes a new instance of the Hypergeometric class. + + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + The random number generator which is used to draw random samples. + + + + Returns a that represents this instance. + + + A that represents this instance. + + + + + Tests whether the provided values are valid parameters for this distribution. + + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the size of the population (N). + + + + + Gets the number of draws without replacement (n). + + + + + Gets the number successes within the population (K, M). + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + the cumulative distribution at location . + + + + + Generates a sample from the Hypergeometric distribution without doing parameter checking. + + The random number generator to use. + The size of the population (N). + The number successes within the population (K, M). + The n parameter of the distribution. + a random number from the Hypergeometric distribution. + + + + Samples a Hypergeometric distributed random variable. + + The number of successes in n trials. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of Hypergeometric distributed random variables. + + a sequence of successes in n trials. + + + + Samples a random variable. + + The random number generator to use. + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Samples a sequence of this random variable. + + The random number generator to use. + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Samples a random variable. + + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Samples a sequence of this random variable. + + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Continuous Univariate Probability Distribution. + + + + + + Gets the mode of the distribution. + + + + + Gets the smallest element in the domain of the distribution which can be represented by a double. + + + + + Gets the largest element in the domain of the distribution which can be represented by a double. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + Draws a random sample from the distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Draws a sequence of random samples from the distribution. + + an infinite sequence of samples from the distribution. + + + + Discrete Univariate Probability Distribution. + + + + + + Gets the mode of the distribution. + + + + + Gets the smallest element in the domain of the distribution which can be represented by an integer. + + + + + Gets the largest element in the domain of the distribution which can be represented by an integer. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Draws a random sample from the distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Draws a sequence of random samples from the distribution. + + an infinite sequence of samples from the distribution. + + + + Probability Distribution. + + + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Continuous Univariate Inverse Gamma distribution. + The inverse Gamma distribution is a distribution over the positive real numbers parameterized by + two positive parameters. + Wikipedia - InverseGamma distribution. + + + + + Initializes a new instance of the class. + + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + + + + Initializes a new instance of the class. + + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + + + + Gets or sets the shape (α) parameter. Range: α > 0. + + + + + Gets or sets The scale (β) parameter. Range: β > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + Throws . + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Draws a random sample from the distribution. + + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Cauchy distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + the cumulative distribution at location . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + a sequence of samples from the distribution. + + + + Multivariate Inverse Wishart distribution. This distribution is + parameterized by the degrees of freedom nu and the scale matrix S. The inverse Wishart distribution + is the conjugate prior for the covariance matrix of a multivariate normal distribution. + Wikipedia - Inverse-Wishart distribution. + + + + + Caches the Cholesky factorization of the scale matrix. + + + + + Initializes a new instance of the class. + + The degree of freedom (ν) for the inverse Wishart distribution. + The scale matrix (Ψ) for the inverse Wishart distribution. + + + + Initializes a new instance of the class. + + The degree of freedom (ν) for the inverse Wishart distribution. + The scale matrix (Ψ) for the inverse Wishart distribution. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The degree of freedom (ν) for the inverse Wishart distribution. + The scale matrix (Ψ) for the inverse Wishart distribution. + + + + Gets or sets the degree of freedom (ν) for the inverse Wishart distribution. + + + + + Gets or sets the scale matrix (Ψ) for the inverse Wishart distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean. + + The mean of the distribution. + + + + Gets the mode of the distribution. + + The mode of the distribution. + A. O'Hagan, and J. J. Forster (2004). Kendall's Advanced Theory of Statistics: Bayesian Inference. 2B (2 ed.). Arnold. ISBN 0-340-80752-0. + + + + Gets the variance of the distribution. + + The variance of the distribution. + Kanti V. Mardia, J. T. Kent and J. M. Bibby (1979). Multivariate Analysis. + + + + Evaluates the probability density function for the inverse Wishart distribution. + + The matrix at which to evaluate the density at. + If the argument does not have the same dimensions as the scale matrix. + the density at . + + + + Samples an inverse Wishart distributed random variable by sampling + a Wishart random variable and inverting the matrix. + + a sample from the distribution. + + + + Samples an inverse Wishart distributed random variable by sampling + a Wishart random variable and inverting the matrix. + + The random number generator to use. + The degree of freedom (ν) for the inverse Wishart distribution. + The scale matrix (Ψ) for the inverse Wishart distribution. + a sample from the distribution. + + + + Univariate Probability Distribution. + + + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the median of the distribution. + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Continuous Univariate Laplace distribution. + The Laplace distribution is a distribution over the real numbers parameterized by a mean and + scale parameter. The PDF is: + p(x) = \frac{1}{2 * scale} \exp{- |x - mean| / scale}. + Wikipedia - Laplace distribution. + + + + + Initializes a new instance of the class (location = 0, scale = 1). + + + + + Initializes a new instance of the class. + + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + If is negative. + + + + Initializes a new instance of the class. + + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + The random number generator which is used to draw random samples. + If is negative. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + + + + Gets the location (μ) of the Laplace distribution. + + + + + Gets the scale (b) of the Laplace distribution. Range: b > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Samples a Laplace distributed random variable. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sample from the Laplace distribution. + + a sample from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + the cumulative distribution at location . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + a sequence of samples from the distribution. + + + + Continuous Univariate Log-Normal distribution. + For details about this distribution, see + Wikipedia - Log-Normal distribution. + + + + + Initializes a new instance of the class. + The distribution will be initialized with the default + random number generator. + + The log-scale (μ) of the logarithm of the distribution. + The shape (σ) of the logarithm of the distribution. Range: σ ≥ 0. + + + + Initializes a new instance of the class. + The distribution will be initialized with the default + random number generator. + + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + The random number generator which is used to draw random samples. + + + + Constructs a log-normal distribution with the desired mu and sigma parameters. + + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + The random number generator which is used to draw random samples. Optional, can be null. + A log-normal distribution. + + + + Constructs a log-normal distribution with the desired mean and variance. + + The mean of the log-normal distribution. + The variance of the log-normal distribution. + The random number generator which is used to draw random samples. Optional, can be null. + A log-normal distribution. + + + + Estimates the log-normal distribution parameters from sample data with maximum-likelihood. + + The samples to estimate the distribution parameters from. + The random number generator which is used to draw random samples. Optional, can be null. + A log-normal distribution. + MATLAB: lognfit + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + + + + Gets the log-scale (μ) (mean of the logarithm) of the distribution. + + + + + Gets the shape (σ) (standard deviation of the logarithm) of the distribution. Range: σ ≥ 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mu of the log-normal distribution. + + + + + Gets the variance of the log-normal distribution. + + + + + Gets the standard deviation of the log-normal distribution. + + + + + Gets the entropy of the log-normal distribution. + + + + + Gets the skewness of the log-normal distribution. + + + + + Gets the mode of the log-normal distribution. + + + + + Gets the median of the log-normal distribution. + + + + + Gets the minimum of the log-normal distribution. + + + + + Gets the maximum of the log-normal distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Generates a sample from the log-normal distribution using the Box-Muller algorithm. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the log-normal distribution using the Box-Muller algorithm. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + the density at . + + MATLAB: lognpdf + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the density. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + the cumulative distribution at location . + + MATLAB: logncdf + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + the inverse cumulative density at . + + MATLAB: logninv + + + + Generates a sample from the log-normal distribution using the Box-Muller algorithm. + + The random number generator to use. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the log-normal distribution using the Box-Muller algorithm. + + The random number generator to use. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + Generates a sample from the log-normal distribution using the Box-Muller algorithm. + + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the log-normal distribution using the Box-Muller algorithm. + + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + Multivariate Matrix-valued Normal distributions. The distribution + is parameterized by a mean matrix (M), a covariance matrix for the rows (V) and a covariance matrix + for the columns (K). If the dimension of M is d-by-m then V is d-by-d and K is m-by-m. + Wikipedia - MatrixNormal distribution. + + + + + The mean of the matrix normal distribution. + + + + + The covariance matrix for the rows. + + + + + The covariance matrix for the columns. + + + + + Initializes a new instance of the class. + + The mean of the matrix normal. + The covariance matrix for the rows. + The covariance matrix for the columns. + If the dimensions of the mean and two covariance matrices don't match. + + + + Initializes a new instance of the class. + + The mean of the matrix normal. + The covariance matrix for the rows. + The covariance matrix for the columns. + The random number generator which is used to draw random samples. + If the dimensions of the mean and two covariance matrices don't match. + + + + Returns a that represents this instance. + + + A that represents this instance. + + + + + Tests whether the provided values are valid parameters for this distribution. + + The mean of the matrix normal. + The covariance matrix for the rows. + The covariance matrix for the columns. + + + + Gets the mean. (M) + + The mean of the distribution. + + + + Gets the row covariance. (V) + + The row covariance. + + + + Gets the column covariance. (K) + + The column covariance. + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Evaluates the probability density function for the matrix normal distribution. + + The matrix at which to evaluate the density at. + the density at + If the argument does not have the correct dimensions. + + + + Samples a matrix normal distributed random variable. + + A random number from this distribution. + + + + Samples a matrix normal distributed random variable. + + The random number generator to use. + The mean of the matrix normal. + The covariance matrix for the rows. + The covariance matrix for the columns. + If the dimensions of the mean and two covariance matrices don't match. + a sequence of samples from the distribution. + + + + Samples a vector normal distributed random variable. + + The random number generator to use. + The mean of the vector normal distribution. + The covariance matrix of the vector normal distribution. + a sequence of samples from defined distribution. + + + + Multivariate Multinomial distribution. For details about this distribution, see + Wikipedia - Multinomial distribution. + + + The distribution is parameterized by a vector of ratios: in other words, the parameter + does not have to be normalized and sum to 1. The reason is that some vectors can't be exactly normalized + to sum to 1 in floating point representation. + + + + + Stores the normalized multinomial probabilities. + + + + + The number of trials. + + + + + Initializes a new instance of the Multinomial class. + + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + The number of trials. + If any of the probabilities are negative or do not sum to one. + If is negative. + + + + Initializes a new instance of the Multinomial class. + + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + The number of trials. + The random number generator which is used to draw random samples. + If any of the probabilities are negative or do not sum to one. + If is negative. + + + + Initializes a new instance of the Multinomial class from histogram . The distribution will + not be automatically updated when the histogram changes. + + Histogram instance + The number of trials. + If any of the probabilities are negative or do not sum to one. + If is negative. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + The number of trials. + If any of the probabilities are negative returns false, + if the sum of parameters is 0.0, or if the number of trials is negative; otherwise true. + + + + Gets the proportion of ratios. + + + + + Gets the number of trials. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Computes values of the probability mass function. + + Non-negative integers x1, ..., xk + The probability mass at location . + When is null. + When length of is not equal to event probabilities count. + + + + Computes values of the log probability mass function. + + Non-negative integers x1, ..., xk + The log probability mass at location . + When is null. + When length of is not equal to event probabilities count. + + + + Samples one multinomial distributed random variable. + + the counts for each of the different possible values. + + + + Samples a sequence multinomially distributed random variables. + + a sequence of counts for each of the different possible values. + + + + Samples one multinomial distributed random variable. + + The random number generator to use. + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + The number of trials. + the counts for each of the different possible values. + + + + Samples a multinomially distributed random variable. + + The random number generator to use. + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + The number of variables needed. + a sequence of counts for each of the different possible values. + + + + Discrete Univariate Negative Binomial distribution. + The negative binomial is a distribution over the natural numbers with two parameters r, p. For the special + case that r is an integer one can interpret the distribution as the number of failures before the r'th success + when the probability of success is p. + Wikipedia - NegativeBinomial distribution. + + + + + Initializes a new instance of the class. + + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Initializes a new instance of the class. + + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + The random number generator which is used to draw random samples. + + + + Returns a that represents this instance. + + + A that represents this instance. + + + + + Tests whether the provided values are valid parameters for this distribution. + + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Gets the number of successes. Range: r ≥ 0. + + + + + Gets the probability of success. Range: 0 ≤ p ≤ 1. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution + + + + + Gets the median of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + the cumulative distribution at location . + + + + + Samples a negative binomial distributed random variable. + + The random number generator to use. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + a sample from the distribution. + + + + Samples a NegativeBinomial distributed random variable. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of NegativeBinomial distributed random variables. + + a sequence of samples from the distribution. + + + + Samples a random variable. + + The random number generator to use. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Samples a sequence of this random variable. + + The random number generator to use. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Samples a random variable. + + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Samples a sequence of this random variable. + + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Continuous Univariate Normal distribution, also known as Gaussian distribution. + For details about this distribution, see + Wikipedia - Normal distribution. + + + + + Initializes a new instance of the Normal class. This is a normal distribution with mean 0.0 + and standard deviation 1.0. The distribution will + be initialized with the default random number generator. + + + + + Initializes a new instance of the Normal class. This is a normal distribution with mean 0.0 + and standard deviation 1.0. The distribution will + be initialized with the default random number generator. + + The random number generator which is used to draw random samples. + + + + Initializes a new instance of the Normal class with a particular mean and standard deviation. The distribution will + be initialized with the default random number generator. + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + + + + Initializes a new instance of the Normal class with a particular mean and standard deviation. The distribution will + be initialized with the default random number generator. + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + The random number generator which is used to draw random samples. + + + + Constructs a normal distribution from a mean and standard deviation. + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + The random number generator which is used to draw random samples. Optional, can be null. + a normal distribution. + + + + Constructs a normal distribution from a mean and variance. + + The mean (μ) of the normal distribution. + The variance (σ^2) of the normal distribution. + The random number generator which is used to draw random samples. Optional, can be null. + A normal distribution. + + + + Constructs a normal distribution from a mean and precision. + + The mean (μ) of the normal distribution. + The precision of the normal distribution. + The random number generator which is used to draw random samples. Optional, can be null. + A normal distribution. + + + + Estimates the normal distribution parameters from sample data with maximum-likelihood. + + The samples to estimate the distribution parameters from. + The random number generator which is used to draw random samples. Optional, can be null. + A normal distribution. + MATLAB: normfit + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + + + + Gets the mean (μ) of the normal distribution. + + + + + Gets the standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + + + + + Gets the variance of the normal distribution. + + + + + Gets the precision of the normal distribution. + + + + + Gets the random number generator which is used to draw random samples. + + + + + Gets the entropy of the normal distribution. + + + + + Gets the skewness of the normal distribution. + + + + + Gets the mode of the normal distribution. + + + + + Gets the median of the normal distribution. + + + + + Gets the minimum of the normal distribution. + + + + + Gets the maximum of the normal distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Generates a sample from the normal distribution using the Box-Muller algorithm. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the normal distribution using the Box-Muller algorithm. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + The location at which to compute the density. + the density at . + + MATLAB: normpdf + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + the cumulative distribution at location . + + MATLAB: normcdf + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + the inverse cumulative density at . + + MATLAB: norminv + + + + Generates a sample from the normal distribution using the Box-Muller algorithm. + + The random number generator to use. + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the normal distribution using the Box-Muller algorithm. + + The random number generator to use. + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + Generates a sample from the normal distribution using the Box-Muller algorithm. + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the normal distribution using the Box-Muller algorithm. + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + This structure represents the type over which the distribution + is defined. + + + + + The mean value. + + + + + The precision value. + + + + + Initializes a new instance of the struct. + + The mean of the pair. + The precision of the pair. + + + + Gets or sets the mean of the pair. + + + + + Gets or sets the precision of the pair. + + + + + Multivariate Normal-Gamma Distribution. + The distribution is the conjugate prior distribution for the + distribution. It specifies a prior over the mean and precision of the distribution. + It is parameterized by four numbers: the mean location, the mean scale, the precision shape and the + precision inverse scale. + The distribution NG(mu, tau | mloc,mscale,psscale,pinvscale) = Normal(mu | mloc, 1/(mscale*tau)) * Gamma(tau | psscale,pinvscale). + The following degenerate cases are special: when the precision is known, + the precision shape will encode the value of the precision while the precision inverse scale is positive + infinity. When the mean is known, the mean location will encode the value of the mean while the scale + will be positive infinity. A completely degenerate NormalGamma distribution with known mean and precision is possible as well. + Wikipedia - Normal-Gamma distribution. + + + + + Initializes a new instance of the class. + + The location of the mean. + The scale of the mean. + The shape of the precision. + The inverse scale of the precision. + + + + Initializes a new instance of the class. + + The location of the mean. + The scale of the mean. + The shape of the precision. + The inverse scale of the precision. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The location of the mean. + The scale of the mean. + The shape of the precision. + The inverse scale of the precision. + + + + Gets the location of the mean. + + + + + Gets the scale of the mean. + + + + + Gets the shape of the precision. + + + + + Gets the inverse scale of the precision. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Returns the marginal distribution for the mean of the NormalGamma distribution. + + the marginal distribution for the mean of the NormalGamma distribution. + + + + Returns the marginal distribution for the precision of the distribution. + + The marginal distribution for the precision of the distribution/ + + + + Gets the mean of the distribution. + + The mean of the distribution. + + + + Gets the variance of the distribution. + + The mean of the distribution. + + + + Evaluates the probability density function for a NormalGamma distribution. + + The mean/precision pair of the distribution + Density value + + + + Evaluates the probability density function for a NormalGamma distribution. + + The mean of the distribution + The precision of the distribution + Density value + + + + Evaluates the log probability density function for a NormalGamma distribution. + + The mean/precision pair of the distribution + The log of the density value + + + + Evaluates the log probability density function for a NormalGamma distribution. + + The mean of the distribution + The precision of the distribution + The log of the density value + + + + Generates a sample from the NormalGamma distribution. + + a sample from the distribution. + + + + Generates a sequence of samples from the NormalGamma distribution + + a sequence of samples from the distribution. + + + + Generates a sample from the NormalGamma distribution. + + The random number generator to use. + The location of the mean. + The scale of the mean. + The shape of the precision. + The inverse scale of the precision. + a sample from the distribution. + + + + Generates a sequence of samples from the NormalGamma distribution + + The random number generator to use. + The location of the mean. + The scale of the mean. + The shape of the precision. + The inverse scale of the precision. + a sequence of samples from the distribution. + + + + Continuous Univariate Pareto distribution. + The Pareto distribution is a power law probability distribution that coincides with social, + scientific, geophysical, actuarial, and many other types of observable phenomena. + For details about this distribution, see + Wikipedia - Pareto distribution. + + + + + Initializes a new instance of the class. + + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + If or are negative. + + + + Initializes a new instance of the class. + + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + The random number generator which is used to draw random samples. + If or are negative. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + + + + Gets the scale (xm) of the distribution. Range: xm > 0. + + + + + Gets the shape (α) of the distribution. Range: α > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Draws a random sample from the distribution. + + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Pareto distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + the inverse cumulative density at . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + a sequence of samples from the distribution. + + + + Discrete Univariate Poisson distribution. + + + Distribution is described at Wikipedia - Poisson distribution. + Knuth's method is used to generate Poisson distributed random variables. + f(x) = exp(-λ)*λ^x/x!; + + + + + Initializes a new instance of the class. + + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + If is equal or less then 0.0. + + + + Initializes a new instance of the class. + + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + The random number generator which is used to draw random samples. + If is equal or less then 0.0. + + + + Returns a that represents this instance. + + + A that represents this instance. + + + + + Tests whether the provided values are valid parameters for this distribution. + + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + + + + Gets the Poisson distribution parameter λ. Range: λ > 0. + + + + + Gets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + Approximation, see Wikipedia Poisson distribution + + + + Gets the skewness of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + Approximation, see Wikipedia Poisson distribution + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + the cumulative distribution at location . + + + + + Generates one sample from the Poisson distribution. + + The random source to use. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + A random sample from the Poisson distribution. + + + + Generates one sample from the Poisson distribution by Knuth's method. + + The random source to use. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + A random sample from the Poisson distribution. + + + + Generates one sample from the Poisson distribution by "Rejection method PA". + + The random source to use. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + A random sample from the Poisson distribution. + "Rejection method PA" from "The Computer Generation of Poisson Random Variables" by A. C. Atkinson, + Journal of the Royal Statistical Society Series C (Applied Statistics) Vol. 28, No. 1. (1979) + The article is on pages 29-35. The algorithm given here is on page 32. + + + + Samples a Poisson distributed random variable. + + A sample from the Poisson distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of Poisson distributed random variables. + + a sequence of successes in N trials. + + + + Samples a Poisson distributed random variable. + + The random number generator to use. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + A sample from the Poisson distribution. + + + + Samples a sequence of Poisson distributed random variables. + + The random number generator to use. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Samples a Poisson distributed random variable. + + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + A sample from the Poisson distribution. + + + + Samples a sequence of Poisson distributed random variables. + + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Continuous Univariate Rayleigh distribution. + The Rayleigh distribution (pronounced /ˈreɪli/) is a continuous probability distribution. As an + example of how it arises, the wind speed will have a Rayleigh distribution if the components of + the two-dimensional wind velocity vector are uncorrelated and normally distributed with equal variance. + For details about this distribution, see + Wikipedia - Rayleigh distribution. + + + + + Initializes a new instance of the class. + + The scale (σ) of the distribution. Range: σ > 0. + If is negative. + + + + Initializes a new instance of the class. + + The scale (σ) of the distribution. Range: σ > 0. + The random number generator which is used to draw random samples. + If is negative. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The scale (σ) of the distribution. Range: σ > 0. + + + + Gets the scale (σ) of the distribution. Range: σ > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Draws a random sample from the distribution. + + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Rayleigh distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The scale (σ) of the distribution. Range: σ > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The scale (σ) of the distribution. Range: σ > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The scale (σ) of the distribution. Range: σ > 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The scale (σ) of the distribution. Range: σ > 0. + the inverse cumulative density at . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The scale (σ) of the distribution. Range: σ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The scale (σ) of the distribution. Range: σ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Continuous Univariate Stable distribution. + A random variable is said to be stable (or to have a stable distribution) if it has + the property that a linear combination of two independent copies of the variable has + the same distribution, up to location and scale parameters. + For details about this distribution, see + Wikipedia - Stable distribution. + + + + + Initializes a new instance of the class. + + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + + + + Initializes a new instance of the class. + + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + + + + Gets the stability (α) of the distribution. Range: 2 ≥ α > 0. + + + + + Gets The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + + + + + Gets the scale (c) of the distribution. Range: c > 0. + + + + + Gets the location (μ) of the distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets he entropy of the distribution. + + Always throws a not supported exception. + + + + Gets the skewness of the distribution. + + Throws a not supported exception of Alpha != 2. + + + + Gets the mode of the distribution. + + Throws a not supported exception if Beta != 0. + + + + Gets the median of the distribution. + + Throws a not supported exception if Beta != 0. + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + Throws a not supported exception if Alpha != 2, (Alpha != 1 and Beta !=0), or (Alpha != 0.5 and Beta != 1) + + + + Samples the distribution. + + The random number generator to use. + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + a random number from the distribution. + + + + Draws a random sample from the distribution. + + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Stable distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + the cumulative distribution at location . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + a sequence of samples from the distribution. + + + + Continuous Univariate Student's T-distribution. + Implements the univariate Student t-distribution. For details about this + distribution, see + + Wikipedia - Student's t-distribution. + + We use a slightly generalized version (compared to + Wikipedia) of the Student t-distribution. Namely, one which also + parameterizes the location and scale. See the book "Bayesian Data + Analysis" by Gelman et al. for more details. + The density of the Student t-distribution p(x|mu,scale,dof) = + Gamma((dof+1)/2) (1 + (x - mu)^2 / (scale * scale * dof))^(-(dof+1)/2) / + (Gamma(dof/2)*Sqrt(dof*pi*scale)). + The distribution will use the by + default. Users can get/set the random number generator by using the + property. + The statistics classes will check all the incoming parameters + whether they are in the allowed range. This might involve heavy + computation. Optionally, by setting Control.CheckDistributionParameters + to false, all parameter checks can be turned off. + + + + Initializes a new instance of the StudentT class. This is a Student t-distribution with location 0.0 + scale 1.0 and degrees of freedom 1. + + + + + Initializes a new instance of the StudentT class with a particular location, scale and degrees of + freedom. + + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + + + + Initializes a new instance of the StudentT class with a particular location, scale and degrees of + freedom. + + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + + + + Gets the location (μ) of the Student t-distribution. + + + + + Gets the scale (σ) of the Student t-distribution. Range: σ > 0. + + + + + Gets the degrees of freedom (ν) of the Student t-distribution. Range: ν > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the Student t-distribution. + + + + + Gets the variance of the Student t-distribution. + + + + + Gets the standard deviation of the Student t-distribution. + + + + + Gets the entropy of the Student t-distribution. + + + + + Gets the skewness of the Student t-distribution. + + + + + Gets the mode of the Student t-distribution. + + + + + Gets the median of the Student t-distribution. + + + + + Gets the minimum of the Student t-distribution. + + + + + Gets the maximum of the Student t-distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Samples student-t distributed random variables. + + The algorithm is method 2 in section 5, chapter 9 + in L. Devroye's "Non-Uniform Random Variate Generation" + The random number generator to use. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + a random number from the standard student-t distribution. + + + + Generates a sample from the Student t-distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Student t-distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Generates a sample from the Student t-distribution. + + The random number generator to use. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the Student t-distribution using the Box-Muller algorithm. + + The random number generator to use. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the Student t-distribution. + + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the Student t-distribution using the Box-Muller algorithm. + + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + a sequence of samples from the distribution. + + + + Triangular distribution. + For details, see Wikipedia - Triangular distribution. + + The distribution will use the by default. + Users can get/set the random number generator by using the property. + The statistics classes will check whether all the incoming parameters are in the allowed range. This might involve heavy computation. Optionally, by setting Control.CheckDistributionParameters + to false, all parameter checks can be turned off. + + + + Initializes a new instance of the Triangular class with the given lower bound, upper bound and mode. + + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + If the upper bound is smaller than the mode or if the mode is smaller than the lower bound. + + + + Initializes a new instance of the Triangular class with the given lower bound, upper bound and mode. + + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + The random number generator which is used to draw random samples. + If the upper bound is smaller than the mode or if the mode is smaller than the lower bound. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + + + + Gets the lower bound of the distribution. + + + + + Gets the upper bound of the distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + + Gets the skewness of the distribution. + + + + + Gets or sets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Generates a sample from the Triangular distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Triangular distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + the inverse cumulative density at . + + + + + Generates a sample from the Triangular distribution. + + The random number generator to use. + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + a sample from the distribution. + + + + Generates a sequence of samples from the Triangular distribution. + + The random number generator to use. + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + a sequence of samples from the distribution. + + + + Generates a sample from the Triangular distribution. + + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + a sample from the distribution. + + + + Generates a sequence of samples from the Triangular distribution. + + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + a sequence of samples from the distribution. + + + + Continuous Univariate Weibull distribution. + For details about this distribution, see + Wikipedia - Weibull distribution. + + + The Weibull distribution is parametrized by a shape and scale parameter. + + + + + Reusable intermediate result 1 / (_scale ^ _shape) + + + By caching this parameter we can get slightly better numerics precision + in certain constellations without any additional computations. + + + + + Initializes a new instance of the Weibull class. + + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + + + + Initializes a new instance of the Weibull class. + + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + + + + Gets the shape (k) of the Weibull distribution. Range: k > 0. + + + + + Gets the scale (λ) of the Weibull distribution. Range: λ > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the Weibull distribution. + + + + + Gets the variance of the Weibull distribution. + + + + + Gets the standard deviation of the Weibull distribution. + + + + + Gets the entropy of the Weibull distribution. + + + + + Gets the skewness of the Weibull distribution. + + + + + Gets the mode of the Weibull distribution. + + + + + Gets the median of the Weibull distribution. + + + + + Gets the minimum of the Weibull distribution. + + + + + Gets the maximum of the Weibull distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Generates a sample from the Weibull distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Weibull distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + the cumulative distribution at location . + + + + + Implemented according to: Parameter estimation of the Weibull probability distribution, 1994, Hongzhu Qiao, Chris P. Tsokos + + + + Returns a Weibull distribution. + + + + Generates a sample from the Weibull distribution. + + The random number generator to use. + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the Weibull distribution. + + The random number generator to use. + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the Weibull distribution. + + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the Weibull distribution. + + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Multivariate Wishart distribution. This distribution is + parameterized by the degrees of freedom nu and the scale matrix S. The Wishart distribution + is the conjugate prior for the precision (inverse covariance) matrix of the multivariate + normal distribution. + Wikipedia - Wishart distribution. + + + + + The degrees of freedom for the Wishart distribution. + + + + + The scale matrix for the Wishart distribution. + + + + + Caches the Cholesky factorization of the scale matrix. + + + + + Initializes a new instance of the class. + + The degrees of freedom (n) for the Wishart distribution. + The scale matrix (V) for the Wishart distribution. + + + + Initializes a new instance of the class. + + The degrees of freedom (n) for the Wishart distribution. + The scale matrix (V) for the Wishart distribution. + The random number generator which is used to draw random samples. + + + + Tests whether the provided values are valid parameters for this distribution. + + The degrees of freedom (n) for the Wishart distribution. + The scale matrix (V) for the Wishart distribution. + + + + Gets or sets the degrees of freedom (n) for the Wishart distribution. + + + + + Gets or sets the scale matrix (V) for the Wishart distribution. + + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + The mean of the distribution. + + + + Gets the mode of the distribution. + + The mode of the distribution. + + + + Gets the variance of the distribution. + + The variance of the distribution. + + + + Evaluates the probability density function for the Wishart distribution. + + The matrix at which to evaluate the density at. + If the argument does not have the same dimensions as the scale matrix. + the density at . + + + + Samples a Wishart distributed random variable using the method + Algorithm AS 53: Wishart Variate Generator + W. B. Smith and R. R. Hocking + Applied Statistics, Vol. 21, No. 3 (1972), pp. 341-345 + + A random number from this distribution. + + + + Samples a Wishart distributed random variable using the method + Algorithm AS 53: Wishart Variate Generator + W. B. Smith and R. R. Hocking + Applied Statistics, Vol. 21, No. 3 (1972), pp. 341-345 + + The random number generator to use. + The degrees of freedom (n) for the Wishart distribution. + The scale matrix (V) for the Wishart distribution. + a sequence of samples from the distribution. + + + + Samples the distribution. + + The random number generator to use. + The degrees of freedom (n) for the Wishart distribution. + The scale matrix (V) for the Wishart distribution. + The cholesky decomposition to use. + a random number from the distribution. + + + + Discrete Univariate Zipf distribution. + Zipf's law, an empirical law formulated using mathematical statistics, refers to the fact + that many types of data studied in the physical and social sciences can be approximated with + a Zipfian distribution, one of a family of related discrete power law probability distributions. + For details about this distribution, see + Wikipedia - Zipf distribution. + + + + + The s parameter of the distribution. + + + + + The n parameter of the distribution. + + + + + Initializes a new instance of the class. + + The s parameter of the distribution. + The n parameter of the distribution. + + + + Initializes a new instance of the class. + + The s parameter of the distribution. + The n parameter of the distribution. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The s parameter of the distribution. + The n parameter of the distribution. + + + + Gets or sets the s parameter of the distribution. + + + + + Gets or sets the n parameter of the distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The s parameter of the distribution. + The n parameter of the distribution. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The s parameter of the distribution. + The n parameter of the distribution. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The s parameter of the distribution. + The n parameter of the distribution. + the cumulative distribution at location . + + + + + Generates a sample from the Zipf distribution without doing parameter checking. + + The random number generator to use. + The s parameter of the distribution. + The n parameter of the distribution. + a random number from the Zipf distribution. + + + + Draws a random sample from the distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of zipf distributed random variables. + + a sequence of samples from the distribution. + + + + Samples a random variable. + + The random number generator to use. + The s parameter of the distribution. + The n parameter of the distribution. + + + + Samples a sequence of this random variable. + + The random number generator to use. + The s parameter of the distribution. + The n parameter of the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The s parameter of the distribution. + The n parameter of the distribution. + + + + Samples a random variable. + + The s parameter of the distribution. + The n parameter of the distribution. + + + + Samples a sequence of this random variable. + + The s parameter of the distribution. + The n parameter of the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The s parameter of the distribution. + The n parameter of the distribution. + + + + Integer number theory functions. + + + + + Canonical Modulus. The result has the sign of the divisor. + + + + + Canonical Modulus. The result has the sign of the divisor. + + + + + Canonical Modulus. The result has the sign of the divisor. + + + + + Canonical Modulus. The result has the sign of the divisor. + + + + + Remainder (% operator). The result has the sign of the dividend. + + + + + Remainder (% operator). The result has the sign of the dividend. + + + + + Remainder (% operator). The result has the sign of the dividend. + + + + + Remainder (% operator). The result has the sign of the dividend. + + + + + Find out whether the provided 32 bit integer is an even number. + + The number to very whether it's even. + True if and only if it is an even number. + + + + Find out whether the provided 64 bit integer is an even number. + + The number to very whether it's even. + True if and only if it is an even number. + + + + Find out whether the provided 32 bit integer is an odd number. + + The number to very whether it's odd. + True if and only if it is an odd number. + + + + Find out whether the provided 64 bit integer is an odd number. + + The number to very whether it's odd. + True if and only if it is an odd number. + + + + Find out whether the provided 32 bit integer is a perfect power of two. + + The number to very whether it's a power of two. + True if and only if it is a power of two. + + + + Find out whether the provided 64 bit integer is a perfect power of two. + + The number to very whether it's a power of two. + True if and only if it is a power of two. + + + + Find out whether the provided 32 bit integer is a perfect square, i.e. a square of an integer. + + The number to very whether it's a perfect square. + True if and only if it is a perfect square. + + + + Find out whether the provided 64 bit integer is a perfect square, i.e. a square of an integer. + + The number to very whether it's a perfect square. + True if and only if it is a perfect square. + + + + Raises 2 to the provided integer exponent (0 <= exponent < 31). + + The exponent to raise 2 up to. + 2 ^ exponent. + + + + + Raises 2 to the provided integer exponent (0 <= exponent < 63). + + The exponent to raise 2 up to. + 2 ^ exponent. + + + + + Evaluate the binary logarithm of an integer number. + + Two-step method using a De Bruijn-like sequence table lookup. + + + + Find the closest perfect power of two that is larger or equal to the provided + 32 bit integer. + + The number of which to find the closest upper power of two. + A power of two. + + + + + Find the closest perfect power of two that is larger or equal to the provided + 64 bit integer. + + The number of which to find the closest upper power of two. + A power of two. + + + + + Returns the greatest common divisor (gcd) of two integers using Euclid's algorithm. + + First Integer: a. + Second Integer: b. + Greatest common divisor gcd(a,b) + + + + Returns the greatest common divisor (gcd) of a set of integers using Euclid's + algorithm. + + List of Integers. + Greatest common divisor gcd(list of integers) + + + + Returns the greatest common divisor (gcd) of a set of integers using Euclid's algorithm. + + List of Integers. + Greatest common divisor gcd(list of integers) + + + + Computes the extended greatest common divisor, such that a*x + b*y = gcd(a,b). + + First Integer: a. + Second Integer: b. + Resulting x, such that a*x + b*y = gcd(a,b). + Resulting y, such that a*x + b*y = gcd(a,b) + Greatest common divisor gcd(a,b) + + + long x,y,d; + d = Fn.GreatestCommonDivisor(45,18,out x, out y); + -> d == 9 && x == 1 && y == -2 + + The gcd of 45 and 18 is 9: 18 = 2*9, 45 = 5*9. 9 = 1*45 -2*18, therefore x=1 and y=-2. + + + + + Returns the least common multiple (lcm) of two integers using Euclid's algorithm. + + First Integer: a. + Second Integer: b. + Least common multiple lcm(a,b) + + + + Returns the least common multiple (lcm) of a set of integers using Euclid's algorithm. + + List of Integers. + Least common multiple lcm(list of integers) + + + + Returns the least common multiple (lcm) of a set of integers using Euclid's algorithm. + + List of Integers. + Least common multiple lcm(list of integers) + + + + Collection of functions equivalent to those provided by Microsoft Excel + but backed instead by Math.NET Numerics. + We do not recommend to use them except in an intermediate phase when + porting over solutions previously implemented in Excel. + + + + + An algorithm failed to converge. + + + + + An algorithm failed to converge due to a numerical breakdown. + + + + + An error occured calling native provider function. + + + + + An error occured calling native provider function. + + + + + Native provider was unable to allocate sufficent memory. + + + + + Native provider failed LU inversion do to a singular U matrix. + + + + + Compound Monthly Return or Geometric Return or Annualized Return + + + + + Average Gain or Gain Mean + This is a simple average (arithmetic mean) of the periods with a gain. It is calculated by summing the returns for gain periods (return 0) + and then dividing the total by the number of gain periods. + + http://www.offshore-library.com/kb/statistics.php + + + + Average Loss or LossMean + This is a simple average (arithmetic mean) of the periods with a loss. It is calculated by summing the returns for loss periods (return < 0) + and then dividing the total by the number of loss periods. + + http://www.offshore-library.com/kb/statistics.php + + + + Calculation is similar to Standard Deviation , except it calculates an average (mean) return only for periods with a gain + and measures the variation of only the gain periods around the gain mean. Measures the volatility of upside performance. + © Copyright 1996, 1999 Gary L.Gastineau. First Edition. © 1992 Swiss Bank Corporation. + + + + + Similar to standard deviation, except this statistic calculates an average (mean) return for only the periods with a loss and then + measures the variation of only the losing periods around this loss mean. This statistic measures the volatility of downside performance. + + http://www.offshore-library.com/kb/statistics.php + + + + This measure is similar to the loss standard deviation except the downside deviation + considers only returns that fall below a defined minimum acceptable return (MAR) rather than the arithmetic mean. + For example, if the MAR is 7%, the downside deviation would measure the variation of each period that falls below + 7%. (The loss standard deviation, on the other hand, would take only losing periods, calculate an average return for + the losing periods, and then measure the variation between each losing return and the losing return average). + + + + + A measure of volatility in returns below the mean. It's similar to standard deviation, but it only + looks at periods where the investment return was less than average return. + + + + + Measures a fund’s average gain in a gain period divided by the fund’s average loss in a losing + period. Periods can be monthly or quarterly depending on the data frequency. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Example: 1e-14. + Maximum number of iterations. Example: 100. + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first derivative of the function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Example: 1e-14. + Maximum number of iterations. Example: 100. + + + + Find both complex roots of the quadratic equation c + b*x + a*x^2 = 0. + Note the special coefficient order ascending by exponent (consistent with polynomials). + + + + + Find all three complex roots of the cubic equation d + c*x + b*x^2 + a*x^3 = 0. + Note the special coefficient order ascending by exponent (consistent with polynomials). + + + + + Find all roots of the Chebychev polynomial of the first kind. + + The polynomial order and therefore the number of roots. + The real domain interval begin where to start sampling. + The real domain interval end where to stop sampling. + Samples in [a,b] at (b+a)/2+(b-1)/2*cos(pi*(2i-1)/(2n)) + + + + Find all roots of the Chebychev polynomial of the second kind. + + The polynomial order and therefore the number of roots. + The real domain interval begin where to start sampling. + The real domain interval end where to stop sampling. + Samples in [a,b] at (b+a)/2+(b-1)/2*cos(pi*i/(n-1)) + + + + Least-Squares Curve Fitting Routines + + + + + Least-Squares fitting the points (x,y) to a line y : x -> a+b*x, + returning its best fitting parameters as [a, b] array, + where a is the intercept and b the slope. + + + + + Least-Squares fitting the points (x,y) to a line y : x -> a+b*x, + returning a function y' for the best fitting line. + + + + + Least-Squares fitting the points (X,y) = ((x0,x1,..,xk),y) to a linear surface y : X -> p0*x0 + p1*x1 + ... + pk*xk, + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + If an intercept is added, its coefficient will be prepended to the resulting parameters. + + + + + Least-Squares fitting the points (X,y) = ((x0,x1,..,xk),y) to a linear surface y : X -> p0*x0 + p1*x1 + ... + pk*xk, + returning a function y' for the best fitting combination. + If an intercept is added, its coefficient will be prepended to the resulting parameters. + + + + + Weighted Least-Squares fitting the points (X,y) = ((x0,x1,..,xk),y) and weights w to a linear surface y : X -> p0*x0 + p1*x1 + ... + pk*xk, + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + + + + + Least-Squares fitting the points (x,y) to a k-order polynomial y : x -> p0 + p1*x + p2*x^2 + ... + pk*x^k, + returning its best fitting parameters as [p0, p1, p2, ..., pk] array, compatible with Evaluate.Polynomial. + A polynomial with order/degree k has (k+1) coefficients and thus requires at least (k+1) samples. + + + + + Least-Squares fitting the points (x,y) to a k-order polynomial y : x -> p0 + p1*x + p2*x^2 + ... + pk*x^k, + returning a function y' for the best fitting polynomial. + A polynomial with order/degree k has (k+1) coefficients and thus requires at least (k+1) samples. + + + + + Weighted Least-Squares fitting the points (x,y) and weights w to a k-order polynomial y : x -> p0 + p1*x + p2*x^2 + ... + pk*x^k, + returning its best fitting parameters as [p0, p1, p2, ..., pk] array, compatible with Evaluate.Polynomial. + A polynomial with order/degree k has (k+1) coefficients and thus requires at least (k+1) samples. + + + + + Least-Squares fitting the points (x,y) to an arbitrary linear combination y : x -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + + + + + Least-Squares fitting the points (x,y) to an arbitrary linear combination y : x -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning a function y' for the best fitting combination. + + + + + Least-Squares fitting the points (x,y) to an arbitrary linear combination y : x -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + + + + + Least-Squares fitting the points (x,y) to an arbitrary linear combination y : x -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning a function y' for the best fitting combination. + + + + + Least-Squares fitting the points (X,y) = ((x0,x1,..,xk),y) to an arbitrary linear combination y : X -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + + + + + Least-Squares fitting the points (X,y) = ((x0,x1,..,xk),y) to an arbitrary linear combination y : X -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning a function y' for the best fitting combination. + + + + + Least-Squares fitting the points (X,y) = ((x0,x1,..,xk),y) to an arbitrary linear combination y : X -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + + + + + Least-Squares fitting the points (X,y) = ((x0,x1,..,xk),y) to an arbitrary linear combination y : X -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning a function y' for the best fitting combination. + + + + + Least-Squares fitting the points (T,y) = (T,y) to an arbitrary linear combination y : X -> p0*f0(T) + p1*f1(T) + ... + pk*fk(T), + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + + + + + Least-Squares fitting the points (T,y) = (T,y) to an arbitrary linear combination y : X -> p0*f0(T) + p1*f1(T) + ... + pk*fk(T), + returning a function y' for the best fitting combination. + + + + + Least-Squares fitting the points (T,y) = (T,y) to an arbitrary linear combination y : X -> p0*f0(T) + p1*f1(T) + ... + pk*fk(T), + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + + + + + Least-Squares fitting the points (T,y) = (T,y) to an arbitrary linear combination y : X -> p0*f0(T) + p1*f1(T) + ... + pk*fk(T), + returning a function y' for the best fitting combination. + + + + + Generate samples by sampling a function at the provided points. + + + + + Generate a sample sequence by sampling a function at the provided point sequence. + + + + + Generate samples by sampling a function at the provided points. + + + + + Generate a sample sequence by sampling a function at the provided point sequence. + + + + + Generate a linearly spaced sample vector of the given length between the specified values (inclusive). + Equivalent to MATLAB linspace but with the length as first instead of last argument. + + + + + Generate samples by sampling a function at linearly spaced points between the specified values (inclusive). + + + + + Generate a base 10 logarithmically spaced sample vector of the given length between the specified decade exponents (inclusive). + Equivalent to MATLAB logspace but with the length as first instead of last argument. + + + + + Generate samples by sampling a function at base 10 logarithmically spaced points between the specified decade exponents (inclusive). + + + + + Generate a linearly spaced sample vector within the inclusive interval (start, stop) and step 1. + Equivalent to MATLAB colon operator (:). + + + + + Generate a linearly spaced sample vector within the inclusive interval (start, stop) and step 1. + Equivalent to MATLAB colon operator (:). + + + + + Generate a linearly spaced sample vector within the inclusive interval (start, stop) and the provided step. + The start value is aways included as first value, but stop is only included if it stop-start is a multiple of step. + Equivalent to MATLAB double colon operator (::). + + + + + Generate a linearly spaced sample vector within the inclusive interval (start, stop) and the provided step. + The start value is aways included as first value, but stop is only included if it stop-start is a multiple of step. + Equivalent to MATLAB double colon operator (::). + + + + + Generate a linearly spaced sample vector within the inclusive interval (start, stop) and the provide step. + The start value is aways included as first value, but stop is only included if it stop-start is a multiple of step. + Equivalent to MATLAB double colon operator (::). + + + + + Generate samples by sampling a function at linearly spaced points within the inclusive interval (start, stop) and the provide step. + The start value is aways included as first value, but stop is only included if it stop-start is a multiple of step. + + + + + Create a periodic wave. + + The number of samples to generate. + Samples per time unit (Hz). Must be larger than twice the frequency to satisfy the Nyquist criterion. + Frequency in periods per time unit (Hz). + The length of the period when sampled at one sample per time unit. This is the interval of the periodic domain, a typical value is 1.0, or 2*Pi for angular functions. + Optional phase offset. + Optional delay, relative to the phase. + + + + Create a periodic wave. + + The number of samples to generate. + The function to apply to each of the values and evaluate the resulting sample. + Samples per time unit (Hz). Must be larger than twice the frequency to satisfy the Nyquist criterion. + Frequency in periods per time unit (Hz). + The length of the period when sampled at one sample per time unit. This is the interval of the periodic domain, a typical value is 1.0, or 2*Pi for angular functions. + Optional phase offset. + Optional delay, relative to the phase. + + + + Create an infinite periodic wave sequence. + + Samples per time unit (Hz). Must be larger than twice the frequency to satisfy the Nyquist criterion. + Frequency in periods per time unit (Hz). + The length of the period when sampled at one sample per time unit. This is the interval of the periodic domain, a typical value is 1.0, or 2*Pi for angular functions. + Optional phase offset. + Optional delay, relative to the phase. + + + + Create an infinite periodic wave sequence. + + The function to apply to each of the values and evaluate the resulting sample. + Samples per time unit (Hz). Must be larger than twice the frequency to satisfy the Nyquist criterion. + Frequency in periods per time unit (Hz). + The length of the period when sampled at one sample per time unit. This is the interval of the periodic domain, a typical value is 1.0, or 2*Pi for angular functions. + Optional phase offset. + Optional delay, relative to the phase. + + + + Create a Sine wave. + + The number of samples to generate. + Samples per time unit (Hz). Must be larger than twice the frequency to satisfy the Nyquist criterion. + Frequency in periods per time unit (Hz). + The maximal reached peak. + The mean, or DC part, of the signal. + Optional phase offset. + Optional delay, relative to the phase. + + + + Create an infinite Sine wave sequence. + + Samples per unit. + Frequency in samples per unit. + The maximal reached peak. + The mean, or DC part, of the signal. + Optional phase offset. + Optional delay, relative to the phase. + + + + Create a periodic square wave, starting with the high phase. + + The number of samples to generate. + Number of samples of the high phase. + Number of samples of the low phase. + Sample value to be emitted during the low phase. + Sample value to be emitted during the high phase. + Optional delay. + + + + Create an infinite periodic square wave sequence, starting with the high phase. + + Number of samples of the high phase. + Number of samples of the low phase. + Sample value to be emitted during the low phase. + Sample value to be emitted during the high phase. + Optional delay. + + + + Create a periodic triangle wave, starting with the raise phase from the lowest sample. + + The number of samples to generate. + Number of samples of the raise phase. + Number of samples of the fall phase. + Lowest sample value. + Highest sample value. + Optional delay. + + + + Create an infinite periodic triangle wave sequence, starting with the raise phase from the lowest sample. + + Number of samples of the raise phase. + Number of samples of the fall phase. + Lowest sample value. + Highest sample value. + Optional delay. + + + + Create a periodic sawtooth wave, starting with the lowest sample. + + The number of samples to generate. + Number of samples a full sawtooth period. + Lowest sample value. + Highest sample value. + Optional delay. + + + + Create an infinite periodic sawtooth wave sequence, starting with the lowest sample. + + Number of samples a full sawtooth period. + Lowest sample value. + Highest sample value. + Optional delay. + + + + Create an array with each field set to the same value. + + The number of samples to generate. + The value that each field should be set to. + + + + Create an infinite sequence where each element has the same value. + + The value that each element should be set to. + + + + Create a Heaviside Step sample vector. + + The number of samples to generate. + The maximal reached peak. + Offset to the time axis. + + + + Create an infinite Heaviside Step sample sequence. + + The maximal reached peak. + Offset to the time axis. + + + + Create a Kronecker Delta impulse sample vector. + + The number of samples to generate. + The maximal reached peak. + Offset to the time axis. Zero or positive. + + + + Create a Kronecker Delta impulse sample vector. + + The maximal reached peak. + Offset to the time axis, hence the sample index of the impulse. + + + + Create a periodic Kronecker Delta impulse sample vector. + + The number of samples to generate. + impulse sequence period. + The maximal reached peak. + Offset to the time axis. Zero or positive. + + + + Create a Kronecker Delta impulse sample vector. + + impulse sequence period. + The maximal reached peak. + Offset to the time axis. Zero or positive. + + + + Generate samples generated by the given computation. + + + + + Generate an infinite sequence generated by the given computation. + + + + + Create random samples, uniform between 0 and 1. + Faster than other methods but with reduced guarantees on randomness. + + + + + Create an infinite random sample sequence, uniform between 0 and 1. + Faster than other methods but with reduced guarantees on randomness. + + + + + Generate samples by sampling a function at samples from a probability distribution, uniform between 0 and 1. + Faster than other methods but with reduced guarantees on randomness. + + + + + Generate a sample sequence by sampling a function at samples from a probability distribution, uniform between 0 and 1. + Faster than other methods but with reduced guarantees on randomness. + + + + + Generate samples by sampling a function at sample pairs from a probability distribution, uniform between 0 and 1. + Faster than other methods but with reduced guarantees on randomness. + + + + + Generate a sample sequence by sampling a function at sample pairs from a probability distribution, uniform between 0 and 1. + Faster than other methods but with reduced guarantees on randomness. + + + + + Create samples with independent amplitudes of standard distribution. + + + + + Create an infinite sample sequence with independent amplitudes of standard distribution. + + + + + Create samples with independent amplitudes of normal distribution and a flat spectral density. + + + + + Create an infinite sample sequence with independent amplitudes of normal distribution and a flat spectral density. + + + + + Create samples with independent amplitudes of normal distribution and a flat spectral density. + + + + + Create an infinite sample sequence with independent amplitudes of normal distribution and a flat spectral density. + + + + + Create skew alpha stable samples. + + The number of samples to generate. + Stability alpha-parameter of the stable distribution + Skewness beta-parameter of the stable distribution + Scale c-parameter of the stable distribution + Location mu-parameter of the stable distribution + + + + Create skew alpha stable samples. + + Stability alpha-parameter of the stable distribution + Skewness beta-parameter of the stable distribution + Scale c-parameter of the stable distribution + Location mu-parameter of the stable distribution + + + + Create random samples. + + + + + Create an infinite random sample sequence. + + + + + Create random samples. + + + + + Create an infinite random sample sequence. + + + + + Create random samples. + + + + + Create an infinite random sample sequence. + + + + + Create random samples. + + + + + Create an infinite random sample sequence. + + + + + Generate samples by sampling a function at samples from a probability distribution. + + + + + Generate a sample sequence by sampling a function at samples from a probability distribution. + + + + + Generate samples by sampling a function at sample pairs from a probability distribution. + + + + + Generate a sample sequence by sampling a function at sample pairs from a probability distribution. + + + + + Globalized String Handling Helpers + + + + + Tries to get a from the format provider, + returning the current culture if it fails. + + + An that supplies culture-specific + formatting information. + + A instance. + + + + Tries to get a from the format + provider, returning the current culture if it fails. + + + An that supplies culture-specific + formatting information. + + A instance. + + + + Tries to get a from the format provider, returning the current culture if it fails. + + + An that supplies culture-specific + formatting information. + + A instance. + + + + Globalized Parsing: Tokenize a node by splitting it into several nodes. + + Node that contains the trimmed string to be tokenized. + List of keywords to tokenize by. + keywords to skip looking for (because they've already been handled). + + + + Globalized Parsing: Parse a double number + + First token of the number. + Culture Info. + The parsed double number using the given culture information. + + + + + Globalized Parsing: Parse a float number + + First token of the number. + Culture Info. + The parsed float number using the given culture information. + + + + + Calculates the R-Squared value, also known as coefficient of determination, + given modelled and observed values + + The values expected from the modelled + The actual data set values obtained + Squared Person product-momentum correlation coefficient. + + + + Calculates the R value, also known as linear correlation coefficient, + given modelled and observed values + + The values expected from the modelled + The actual data set values obtained + Person product-momentum correlation coefficient. + + + + Calculates the Standard Error of the regression, given a sequence of + modeled/predicted values, and a sequence of actual/observed values + + The modelled/predicted values + The observed/actual values + The Standard Error of the regression + + + + Calculates the Standard Error of the regression, given a sequence of + modeled/predicted values, and a sequence of actual/observed values + + The modelled/predicted values + The observed/actual values + The degrees of freedom by which the + number of samples is reduced for performing the Standard Error calculation + The Standard Error of the regression + + + + Complex Fast (FFT) Implementation of the Discrete Fourier Transform (DFT). + + + Complex Fast (FFT) Implementation of the Discrete Fourier Transform (DFT). + + + Complex Fast (FFT) Implementation of the Discrete Fourier Transform (DFT). + + + Complex Fast (FFT) Implementation of the Discrete Fourier Transform (DFT). + + + + + Sequences with length greater than Math.Sqrt(Int32.MaxValue) + 1 + will cause k*k in the Bluestein sequence to overflow (GH-286). + + + + + Generate the bluestein sequence for the provided problem size. + + Number of samples. + Bluestein sequence exp(I*Pi*k^2/N) + + + + Convolution with the bluestein sequence (Parallel Version). + + Sample Vector. + + + + Swap the real and imaginary parts of each sample. + + Sample Vector. + + + + Bluestein generic FFT for arbitrary sized sample vectors. + + Time-space sample vector. + Fourier series exponent sign. + + + + Applies the forward Fast Fourier Transform (FFT) to arbitrary-length sample vectors. + + Sample vector, where the FFT is evaluated in place. + + + + Applies the forward Fast Fourier Transform (FFT) to arbitrary-length sample vectors. + + Sample vector, where the FFT is evaluated in place. + Fourier Transform Convention Options. + + + + Applies the forward Fast Fourier Transform (FFT) to arbitrary-length sample vectors. + + Real part of the sample vector, where the FFT is evaluated in place. + Imaginary part of the sample vector, where the FFT is evaluated in place. + Fourier Transform Convention Options. + + + + Packed Real-Complex forward Fast Fourier Transform (FFT) to arbitrary-length sample vectors. + Since for real-valued time samples the complex spectrum is conjugate-even (symmetry), + the spectrum can be fully reconstructed form the positive frequencies only (first half). + The data array needs to be N+2 (if N is even) or N+1 (if N is odd) long in order to support such a packed spectrum. + + Data array of length N+2 (if N is even) or N+1 (if N is odd). + The number of samples. + Fourier Transform Convention Options. + + + + Applies the forward Fast Fourier Transform (FFT) to multiple dimensional sample data. + + Sample data, where the FFT is evaluated in place. + + The data size per dimension. The first dimension is the major one. + For example, with two dimensions "rows" and "columns" the samples are assumed to be organized row by row. + + Fourier Transform Convention Options. + + + + Applies the forward Fast Fourier Transform (FFT) to two dimensional sample data. + + Sample data, organized row by row, where the FFT is evaluated in place + The number of rows. + The number of columns. + Data available organized column by column instead of row by row can be processed directly by swapping the rows and columns arguments. + Fourier Transform Convention Options. + + + + Applies the forward Fast Fourier Transform (FFT) to a two dimensional data in form of a matrix. + + Sample matrix, where the FFT is evaluated in place + Fourier Transform Convention Options. + + + + Applies the inverse Fast Fourier Transform (iFFT) to arbitrary-length sample vectors. + + Spectrum data, where the iFFT is evaluated in place. + + + + Applies the inverse Fast Fourier Transform (iFFT) to arbitrary-length sample vectors. + + Spectrum data, where the iFFT is evaluated in place. + Fourier Transform Convention Options. + + + + Applies the inverse Fast Fourier Transform (iFFT) to arbitrary-length sample vectors. + + Real part of the sample vector, where the iFFT is evaluated in place. + Imaginary part of the sample vector, where the iFFT is evaluated in place. + Fourier Transform Convention Options. + + + + Packed Real-Complex inverse Fast Fourier Transform (iFFT) to arbitrary-length sample vectors. + Since for real-valued time samples the complex spectrum is conjugate-even (symmetry), + the spectrum can be fully reconstructed form the positive frequencies only (first half). + The data array needs to be N+2 (if N is even) or N+1 (if N is odd) long in order to support such a packed spectrum. + + Data array of length N+2 (if N is even) or N+1 (if N is odd). + The number of samples. + Fourier Transform Convention Options. + + + + Applies the inverse Fast Fourier Transform (iFFT) to multiple dimensional sample data. + + Spectrum data, where the iFFT is evaluated in place. + + The data size per dimension. The first dimension is the major one. + For example, with two dimensions "rows" and "columns" the samples are assumed to be organized row by row. + + Fourier Transform Convention Options. + + + + Applies the inverse Fast Fourier Transform (iFFT) to two dimensional sample data. + + Sample data, organized row by row, where the iFFT is evaluated in place + The number of rows. + The number of columns. + Data available organized column by column instead of row by row can be processed directly by swapping the rows and columns arguments. + Fourier Transform Convention Options. + + + + Applies the inverse Fast Fourier Transform (iFFT) to a two dimensional data in form of a matrix. + + Sample matrix, where the iFFT is evaluated in place + Fourier Transform Convention Options. + + + + Naive forward DFT, useful e.g. to verify faster algorithms. + + Time-space sample vector. + Fourier Transform Convention Options. + Corresponding frequency-space vector. + + + + Naive inverse DFT, useful e.g. to verify faster algorithms. + + Frequency-space sample vector. + Fourier Transform Convention Options. + Corresponding time-space vector. + + + + Radix-2 forward FFT for power-of-two sized sample vectors. + + Sample vector, where the FFT is evaluated in place. + Fourier Transform Convention Options. + + + + + Radix-2 inverse FFT for power-of-two sized sample vectors. + + Sample vector, where the FFT is evaluated in place. + Fourier Transform Convention Options. + + + + + Bluestein forward FFT for arbitrary sized sample vectors. + + Sample vector, where the FFT is evaluated in place. + Fourier Transform Convention Options. + + + + Bluestein inverse FFT for arbitrary sized sample vectors. + + Sample vector, where the FFT is evaluated in place. + Fourier Transform Convention Options. + + + + Extract the exponent sign to be used in forward transforms according to the + provided convention options. + + Fourier Transform Convention Options. + Fourier series exponent sign. + + + + Rescale FFT-the resulting vector according to the provided convention options. + + Fourier Transform Convention Options. + Sample Vector. + + + + Rescale the iFFT-resulting vector according to the provided convention options. + + Fourier Transform Convention Options. + Sample Vector. + + + + Generate the frequencies corresponding to each index in frequency space. + The frequency space has a resolution of sampleRate/N. + Index 0 corresponds to the DC part, the following indices correspond to + the positive frequencies up to the Nyquist frequency (sampleRate/2), + followed by the negative frequencies wrapped around. + + Number of samples. + The sampling rate of the time-space data. + + + + Naive generic DFT, useful e.g. to verify faster algorithms. + + Time-space sample vector. + Fourier series exponent sign. + Corresponding frequency-space vector. + + + + Radix-2 Reorder Helper Method + + Sample type + Sample vector + + + + Radix-2 Step Helper Method + + Sample vector. + Fourier series exponent sign. + Level Group Size. + Index inside of the level. + + + + Radix-2 generic FFT for power-of-two sized sample vectors. + + Sample vector, where the FFT is evaluated in place. + Fourier series exponent sign. + + + + + Radix-2 generic FFT for power-of-two sample vectors (Parallel Version). + + Sample vector, where the FFT is evaluated in place. + Fourier series exponent sign. + + + + + Fourier Transform Convention + + + + + Inverse integrand exponent (forward: positive sign; inverse: negative sign). + + + + + Only scale by 1/N in the inverse direction; No scaling in forward direction. + + + + + Don't scale at all (neither on forward nor on inverse transformation). + + + + + Universal; Symmetric scaling and common exponent (used in Maple). + + + + + Only scale by 1/N in the inverse direction; No scaling in forward direction (used in Matlab). [= AsymmetricScaling] + + + + + Inverse integrand exponent; No scaling at all (used in all Numerical Recipes based implementations). [= InverseExponent | NoScaling] + + + + + Fast (FHT) Implementation of the Discrete Hartley Transform (DHT). + + + Fast (FHT) Implementation of the Discrete Hartley Transform (DHT). + + + + + Naive forward DHT, useful e.g. to verify faster algorithms. + + Time-space sample vector. + Hartley Transform Convention Options. + Corresponding frequency-space vector. + + + + Naive inverse DHT, useful e.g. to verify faster algorithms. + + Frequency-space sample vector. + Hartley Transform Convention Options. + Corresponding time-space vector. + + + + Rescale FFT-the resulting vector according to the provided convention options. + + Fourier Transform Convention Options. + Sample Vector. + + + + Rescale the iFFT-resulting vector according to the provided convention options. + + Fourier Transform Convention Options. + Sample Vector. + + + + Naive generic DHT, useful e.g. to verify faster algorithms. + + Time-space sample vector. + Corresponding frequency-space vector. + + + + Hartley Transform Convention + + + + + Only scale by 1/N in the inverse direction; No scaling in forward direction. + + + + + Don't scale at all (neither on forward nor on inverse transformation). + + + + + Universal; Symmetric scaling. + + + + + Numerical Integration (Quadrature). + + + + + Approximation of the definite integral of an analytic smooth function on a closed interval. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + The expected relative accuracy of the approximation. + Approximation of the finite integral in the given interval. + + + + Approximation of the definite integral of an analytic smooth function on a closed interval. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Approximation of the finite integral in the given interval. + + + + Approximates a 2-dimensional definite integral using an Nth order Gauss-Legendre rule over the rectangle [a,b] x [c,d]. + + The 2-dimensional analytic smooth function to integrate. + Where the interval starts for the first (inside) integral, exclusive and finite. + Where the interval ends for the first (inside) integral, exclusive and finite. + Where the interval starts for the second (outside) integral, exclusive and finite. + /// Where the interval ends for the second (outside) integral, exclusive and finite. + Defines an Nth order Gauss-Legendre rule. The order also defines the number of abscissas and weights for the rule. Precomputed Gauss-Legendre abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024 are used, otherwise they're calulcated on the fly. + Approximation of the finite integral in the given interval. + + + + Approximates a 2-dimensional definite integral using an Nth order Gauss-Legendre rule over the rectangle [a,b] x [c,d]. + + The 2-dimensional analytic smooth function to integrate. + Where the interval starts for the first (inside) integral, exclusive and finite. + Where the interval ends for the first (inside) integral, exclusive and finite. + Where the interval starts for the second (outside) integral, exclusive and finite. + /// Where the interval ends for the second (outside) integral, exclusive and finite. + Approximation of the finite integral in the given interval. + + + + Analytic integration algorithm for smooth functions with no discontinuities + or derivative discontinuities and no poles inside the interval. + + + + + Maximum number of iterations, until the asked + maximum error is (likely to be) satisfied. + + + + + Approximate the integral by the double exponential transformation + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + The expected relative accuracy of the approximation. + Approximation of the finite integral in the given interval. + + + + Compute the abscissa vector for a single level. + + The level to evaluate the abscissa vector for. + Abscissa Vector. + + + + Compute the weight vector for a single level. + + The level to evaluate the weight vector for. + Weight Vector. + + + + Precomputed abscissa vector per level. + + + + + Precomputed weight vector per level. + + + + + Approximates a definite integral using an Nth order Gauss-Legendre rule. Precomputed Gauss-Legendre abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024 are used, otherwise they're calulcated on the fly. + + + + + Initializes a new instance of the class. + + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Defines an Nth order Gauss-Legendre rule. The order also defines the number of abscissas and weights for the rule. Precomputed Gauss-Legendre abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024 are used, otherwise they're calulcated on the fly. + + + + Gettter for the ith abscissa. + + Index of the ith abscissa. + The ith abscissa. + + + + Getter that returns a clone of the array containing the abscissas. + + + + + Getter for the ith weight. + + Index of the ith weight. + The ith weight. + + + + Getter that returns a clone of the array containing the weights. + + + + + Getter for the order. + + + + + Getter for the InvervalBegin. + + + + + Getter for the InvervalEnd. + + + + + Approximates a definite integral using an Nth order Gauss-Legendre rule. + + The analytic smooth function to integrate. + Where the interval starts, exclusive and finite. + Where the interval ends, exclusive and finite. + Defines an Nth order Gauss-Legendre rule. The order also defines the number of abscissas and weights for the rule. Precomputed Gauss-Legendre abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024 are used, otherwise they're calulcated on the fly. + Approximation of the finite integral in the given interval. + + + + Approximates a 2-dimensional definite integral using an Nth order Gauss-Legendre rule over the rectangle [a,b] x [c,d]. + + The 2-dimensional analytic smooth function to integrate. + Where the interval starts for the first (inside) integral, exclusive and finite. + Where the interval ends for the first (inside) integral, exclusive and finite. + Where the interval starts for the second (outside) integral, exclusive and finite. + /// Where the interval ends for the second (outside) integral, exclusive and finite. + Defines an Nth order Gauss-Legendre rule. The order also defines the number of abscissas and weights for the rule. Precomputed Gauss-Legendre abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024 are used, otherwise they're calulcated on the fly. + Approximation of the finite integral in the given interval. + + + + Contains a method to compute the Gauss-Legendre abscissas/weights and precomputed abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024. + + + Contains a method to compute the Gauss-Legendre abscissas/weights and precomputed abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024. + + + + + Precomputed abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024. + + + + + Computes the Gauss-Legendre abscissas/weights. + See Pavel Holoborodko for a description of the algorithm. + + Defines an Nth order Gauss-Legendre rule. The order also defines the number of abscissas and weights for the rule. + Required precision to compute the abscissas/weights. 1e-10 is usually fine. + Object containing the non-negative abscissas/weights, order, and intervalBegin/intervalEnd. The non-negative abscissas/weights are generated over the interval [-1,1] for the given order. + + + + Creates and maps a Gauss-Legendre point. + + + + + Getter for the GaussPoint. + + Defines an Nth order Gauss-Legendre rule. Precomputed Gauss-Legendre abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024 are used, otherwise they're calulcated on the fly. + Object containing the non-negative abscissas/weights, order, and intervalBegin/intervalEnd. The non-negative abscissas/weights are generated over the interval [-1,1] for the given order. + + + + Getter for the GaussPoint. + + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Defines an Nth order Gauss-Legendre rule. Precomputed Gauss-Legendre abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024 are used, otherwise they're calulcated on the fly. + Object containing the abscissas/weights, order, and intervalBegin/intervalEnd. + + + + Maps the non-negative abscissas/weights from the interval [-1, 1] to the interval [intervalBegin, intervalEnd]. + + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Object containing the non-negative abscissas/weights, order, and intervalBegin/intervalEnd. The non-negative abscissas/weights are generated over the interval [-1,1] for the given order. + Object containing the abscissas/weights, order, and intervalBegin/intervalEnd. + + + + Contains the abscissas/weights, order, and intervalBegin/intervalEnd. + + + + + Approximation algorithm for definite integrals by the Trapezium rule of the Newton-Cotes family. + + + Wikipedia - Trapezium Rule + + + + + Direct 2-point approximation of the definite integral in the provided interval by the trapezium rule. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Approximation of the finite integral in the given interval. + + + + Composite N-point approximation of the definite integral in the provided interval by the trapezium rule. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Number of composite subdivision partitions. + Approximation of the finite integral in the given interval. + + + + Adaptive approximation of the definite integral in the provided interval by the trapezium rule. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + The expected accuracy of the approximation. + Approximation of the finite integral in the given interval. + + + + Adaptive approximation of the definite integral by the trapezium rule. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Abscissa vector per level provider. + Weight vector per level provider. + First Level Step + The expected relative accuracy of the approximation. + Approximation of the finite integral in the given interval. + + + + Approximation algorithm for definite integrals by Simpson's rule. + + + + + Direct 3-point approximation of the definite integral in the provided interval by Simpson's rule. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Approximation of the finite integral in the given interval. + + + + Composite N-point approximation of the definite integral in the provided interval by Simpson's rule. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Even number of composite subdivision partitions. + Approximation of the finite integral in the given interval. + + + + Interpolation Factory. + + + + + Creates an interpolation based on arbitrary points. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.Barycentric.InterpolateRationalFloaterHormannSorted + instead, which is more efficient. + + + + + Create a floater hormann rational pole-free interpolation based on arbitrary points. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.Barycentric.InterpolateRationalFloaterHormannSorted + instead, which is more efficient. + + + + + Create a Bulirsch Stoer rational interpolation based on arbitrary points. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.BulirschStoerRationalInterpolation.InterpolateSorted + instead, which is more efficient. + + + + + Create a barycentric polynomial interpolation where the given sample points are equidistant. + + The sample points t, must be equidistant. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.Barycentric.InterpolatePolynomialEquidistantSorted + instead, which is more efficient. + + + + + Create a Neville polynomial interpolation based on arbitrary points. + If the points happen to be equidistant, consider to use the much more robust PolynomialEquidistant instead. + Otherwise, consider whether RationalWithoutPoles would not be a more robust alternative. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.NevillePolynomialInterpolation.InterpolateSorted + instead, which is more efficient. + + + + + Create a piecewise linear interpolation based on arbitrary points. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.LinearSpline.InterpolateSorted + instead, which is more efficient. + + + + + Create piecewise log-linear interpolation based on arbitrary points. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.LogLinear.InterpolateSorted + instead, which is more efficient. + + + + + Create an piecewise natural cubic spline interpolation based on arbitrary points, + with zero secondary derivatives at the boundaries. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.CubicSpline.InterpolateNaturalSorted + instead, which is more efficient. + + + + + Create an piecewise cubic Akima spline interpolation based on arbitrary points. + Akima splines are robust to outliers. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.CubicSpline.InterpolateAkimaSorted + instead, which is more efficient. + + + + + Create a piecewise cubic Hermite spline interpolation based on arbitrary points + and their slopes/first derivative. + + The sample points t. + The sample point values x(t). + The slope at the sample points. Optimized for arrays. + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.CubicSpline.InterpolateHermiteSorted + instead, which is more efficient. + + + + + Create a step-interpolation based on arbitrary points. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.StepInterpolation.InterpolateSorted + instead, which is more efficient. + + + + + Barycentric Interpolation Algorithm. + + Supports neither differentiation nor integration. + + + Sample points (N), sorted ascendingly. + Sample values (N), sorted ascendingly by x. + Barycentric weights (N), sorted ascendingly by x. + + + + Create a barycentric polynomial interpolation from a set of (x,y) value pairs with equidistant x, sorted ascendingly by x. + + + + + Create a barycentric polynomial interpolation from an unordered set of (x,y) value pairs with equidistant x. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a barycentric polynomial interpolation from an unsorted set of (x,y) value pairs with equidistant x. + + + + + Create a barycentric polynomial interpolation from a set of values related to linearly/equidistant spaced points within an interval. + + + + + Create a barycentric rational interpolation without poles, using Mike Floater and Kai Hormann's Algorithm. + The values are assumed to be sorted ascendingly by x. + + Sample points (N), sorted ascendingly. + Sample values (N), sorted ascendingly by x. + + Order of the interpolation scheme, 0 <= order <= N. + In most cases a value between 3 and 8 gives good results. + + + + + Create a barycentric rational interpolation without poles, using Mike Floater and Kai Hormann's Algorithm. + WARNING: Works in-place and can thus causes the data array to be reordered. + + Sample points (N), no sorting assumed. + Sample values (N). + + Order of the interpolation scheme, 0 <= order <= N. + In most cases a value between 3 and 8 gives good results. + + + + + Create a barycentric rational interpolation without poles, using Mike Floater and Kai Hormann's Algorithm. + + Sample points (N), no sorting assumed. + Sample values (N). + + Order of the interpolation scheme, 0 <= order <= N. + In most cases a value between 3 and 8 gives good results. + + + + + Create a barycentric rational interpolation without poles, using Mike Floater and Kai Hormann's Algorithm. + The values are assumed to be sorted ascendingly by x. + + Sample points (N), sorted ascendingly. + Sample values (N), sorted ascendingly by x. + + + + Create a barycentric rational interpolation without poles, using Mike Floater and Kai Hormann's Algorithm. + WARNING: Works in-place and can thus causes the data array to be reordered. + + Sample points (N), no sorting assumed. + Sample values (N). + + + + Create a barycentric rational interpolation without poles, using Mike Floater and Kai Hormann's Algorithm. + + Sample points (N), no sorting assumed. + Sample values (N). + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. NOT SUPPORTED. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. NOT SUPPORTED. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. NOT SUPPORTED. + + Point t to integrate at. + + + + Definite integral between points a and b. NOT SUPPORTED. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Rational Interpolation (with poles) using Roland Bulirsch and Josef Stoer's Algorithm. + + + + This algorithm supports neither differentiation nor integration. + + + + + Sample Points t, sorted ascendingly. + Sample Values x(t), sorted ascendingly by x. + + + + Create a Bulirsch-Stoer rational interpolation from a set of (x,y) value pairs, sorted ascendingly by x. + + + + + Create a Bulirsch-Stoer rational interpolation from an unsorted set of (x,y) value pairs. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a Bulirsch-Stoer rational interpolation from an unsorted set of (x,y) value pairs. + + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. NOT SUPPORTED. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. NOT SUPPORTED. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. NOT SUPPORTED. + + Point t to integrate at. + + + + Definite integral between points a and b. NOT SUPPORTED. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Cubic Spline Interpolation. + + Supports both differentiation and integration. + + + sample points (N+1), sorted ascending + Zero order spline coefficients (N) + First order spline coefficients (N) + second order spline coefficients (N) + third order spline coefficients (N) + + + + Create a hermite cubic spline interpolation from a set of (x,y) value pairs and their slope (first derivative), sorted ascendingly by x. + + + + + Create a hermite cubic spline interpolation from an unsorted set of (x,y) value pairs and their slope (first derivative). + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a hermite cubic spline interpolation from an unsorted set of (x,y) value pairs and their slope (first derivative). + + + + + Create an Akima cubic spline interpolation from a set of (x,y) value pairs, sorted ascendingly by x. + Akima splines are robust to outliers. + + + + + Create an Akima cubic spline interpolation from an unsorted set of (x,y) value pairs. + Akima splines are robust to outliers. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create an Akima cubic spline interpolation from an unsorted set of (x,y) value pairs. + Akima splines are robust to outliers. + + + + + Create a cubic spline interpolation from a set of (x,y) value pairs, sorted ascendingly by x, + and custom boundary/termination conditions. + + + + + Create a cubic spline interpolation from an unsorted set of (x,y) value pairs and custom boundary/termination conditions. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a cubic spline interpolation from an unsorted set of (x,y) value pairs and custom boundary/termination conditions. + + + + + Create a natural cubic spline interpolation from a set of (x,y) value pairs + and zero second derivatives at the two boundaries, sorted ascendingly by x. + + + + + Create a natural cubic spline interpolation from an unsorted set of (x,y) value pairs + and zero second derivatives at the two boundaries. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a natural cubic spline interpolation from an unsorted set of (x,y) value pairs + and zero second derivatives at the two boundaries. + + + + + Three-Point Differentiation Helper. + + Sample Points t. + Sample Values x(t). + Index of the point of the differentiation. + Index of the first sample. + Index of the second sample. + Index of the third sample. + The derivative approximation. + + + + Tridiagonal Solve Helper. + + The a-vector[n]. + The b-vector[n], will be modified by this function. + The c-vector[n]. + The d-vector[n], will be modified by this function. + The x-vector[n] + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. + + Point t to integrate at. + + + + Definite integral between points a and b. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Find the index of the greatest sample point smaller than t, + or the left index of the closest segment for extrapolation. + + + + + Interpolation within the range of a discrete set of known data points. + + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. + + Point t to integrate at. + + + + Definite integral between points a and b. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Piece-wise Linear Interpolation. + + Supports both differentiation and integration. + + + Sample points (N+1), sorted ascending + Sample values (N or N+1) at the corresponding points; intercept, zero order coefficients + Slopes (N) at the sample points (first order coefficients): N + + + + Create a linear spline interpolation from a set of (x,y) value pairs, sorted ascendingly by x. + + + + + Create a linear spline interpolation from an unsorted set of (x,y) value pairs. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a linear spline interpolation from an unsorted set of (x,y) value pairs. + + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. + + Point t to integrate at. + + + + Definite integral between points a and b. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Find the index of the greatest sample point smaller than t, + or the left index of the closest segment for extrapolation. + + + + + Piece-wise Log-Linear Interpolation + + This algorithm supports differentiation, not integration. + + + + Internal Spline Interpolation + + + + Sample points (N), sorted ascending + Natural logarithm of the sample values (N) at the corresponding points + + + + Create a piecewise log-linear interpolation from a set of (x,y) value pairs, sorted ascendingly by x. + + + + + Create a piecewise log-linear interpolation from an unsorted set of (x,y) value pairs. + WARNING: Works in-place and can thus causes the data array to be reordered and modified. + + + + + Create a piecewise log-linear interpolation from an unsorted set of (x,y) value pairs. + + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. + + Point t to integrate at. + + + + Definite integral between points a and b. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Lagrange Polynomial Interpolation using Neville's Algorithm. + + + + This algorithm supports differentiation, but doesn't support integration. + + + When working with equidistant or Chebyshev sample points it is + recommended to use the barycentric algorithms specialized for + these cases instead of this arbitrary Neville algorithm. + + + + + Sample Points t, sorted ascendingly. + Sample Values x(t), sorted ascendingly by x. + + + + Create a Neville polynomial interpolation from a set of (x,y) value pairs, sorted ascendingly by x. + + + + + Create a Neville polynomial interpolation from an unsorted set of (x,y) value pairs. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a Neville polynomial interpolation from an unsorted set of (x,y) value pairs. + + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. NOT SUPPORTED. + + Point t to integrate at. + + + + Definite integral between points a and b. NOT SUPPORTED. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Quadratic Spline Interpolation. + + Supports both differentiation and integration. + + + sample points (N+1), sorted ascending + Zero order spline coefficients (N) + First order spline coefficients (N) + second order spline coefficients (N) + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. + + Point t to integrate at. + + + + Definite integral between points a and b. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Find the index of the greatest sample point smaller than t, + or the left index of the closest segment for extrapolation. + + + + + Left and right boundary conditions. + + + + + Natural Boundary (Zero second derivative). + + + + + Parabolically Terminated boundary. + + + + + Fixed first derivative at the boundary. + + + + + Fixed second derivative at the boundary. + + + + + A step function where the start of each segment is included, and the last segment is open-ended. + Segment i is [x_i, x_i+1) for i < N, or [x_i, infinity] for i = N. + The domain of the function is all real numbers, such that y = 0 where x <. + + Supports both differentiation and integration. + + + Sample points (N), sorted ascending + Samples values (N) of each segment starting at the corresponding sample point. + + + + Create a linear spline interpolation from a set of (x,y) value pairs, sorted ascendingly by x. + + + + + Create a linear spline interpolation from an unsorted set of (x,y) value pairs. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a linear spline interpolation from an unsorted set of (x,y) value pairs. + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. + + Point t to integrate at. + + + + Definite integral between points a and b. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Find the index of the greatest sample point smaller than t. + + + + + Wraps an interpolation with a transformation of the interpolated values. + + Neither differentiation nor integration is supported. + + + + Create a linear spline interpolation from a set of (x,y) value pairs, sorted ascendingly by x. + + + + + Create a linear spline interpolation from an unsorted set of (x,y) value pairs. + WARNING: Works in-place and can thus causes the data array to be reordered and modified. + + + + + Create a linear spline interpolation from an unsorted set of (x,y) value pairs. + + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. NOT SUPPORTED. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. NOT SUPPORTED. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. NOT SUPPORTED. + + Point t to integrate at. + + + + Definite integral between points a and b. NOT SUPPORTED. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + A Matrix class with dense storage. The underlying storage is a one dimensional array in column-major order (column by column). + + + + + Number of rows. + + Using this instead of the RowCount property to speed up calculating + a matrix index in the data array. + + + + Number of columns. + + Using this instead of the ColumnCount property to speed up calculating + a matrix index in the data array. + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new dense matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new dense matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to be in column-major order (column by column) and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + + Create a new dense matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable. + The enumerable is assumed to be in column-major order (column by column). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix and initialize each value to the same provided value. + + + + + Create a new dense matrix and initialize each value using the provided init function. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new dense matrix with values sampled from the provided random distribution. + + + + + Gets the matrix's data. + + The matrix's data. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of add + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the matrix and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Evaluates whether this matrix is symmetric. + + + + + A vector using dense storage. + + + + + Number of elements + + + + + Gets the vector's data. + + + + + Create a new dense vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new dense vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new dense vector directly binding to a raw array. + The array is used directly without copying. + Very efficient, but changes to the array and the vector will affect each other. + + + + + Create a new dense vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector and initialize each value using the provided value. + + + + + Create a new dense vector and initialize each value using the provided init function. + + + + + Create a new dense vector with values sampled from the provided random distribution. + + + + + Gets the vector's data. + + The vector's data. + + + + Returns a reference to the internal data structure. + + The DenseVector whose internal data we are + returning. + + A reference to the internal date of the given vector. + + + + + Returns a vector bound directly to a reference of the provided array. + + The array to bind to the DenseVector object. + + A DenseVector whose values are bound to the given array. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + The scalar to add. + The vector to store the result of the addition. + + + + Adds another vector to this vector and stores the result into the result vector. + + The vector to add to this one. + The vector to store the result of the addition. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The vector to store the result of the subtraction. + + + + Subtracts another vector to this vector and stores the result into the result vector. + + The vector to subtract from this one. + The vector to store the result of the subtraction. + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Negates vector and saves result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + The scalar to multiply. + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Multiplies a vector with a scalar. + + The vector to scale. + The scalar value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a scalar. + + The scalar value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a scalar. + + The vector to divide. + The scalar value. + The result of the division. + If is . + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The divisor to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The divisor to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + of each element of the vector of the given divisor. + + The vector whose elements we want to compute the remainder of. + The divisor to use, + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Creates a double dense vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n,n,..', '(n,n,..)', '[n,n,...]', where n is a double. + + + A double dense vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a real dense vector to double-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a real dense vector to double-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + A matrix type for diagonal matrices. + + + Diagonal matrices can be non-square matrices but the diagonal always starts + at element 0,0. A diagonal matrix will throw an exception if non diagonal + entries are set. The exception to this is when the off diagonal elements are + 0.0 or NaN; these settings will cause no change to the diagonal matrix. + + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new diagonal matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All diagonal cells of the matrix will be initialized to the provided value, all non-diagonal ones to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to contain the diagonal elements only and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new diagonal matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + The matrix to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + The array to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided enumerable. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new diagonal matrix with diagonal values sampled from the provided random distribution. + + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + If the result matrix's dimensions are not the same as this matrix. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to add. + The matrix to store the result of the division. + + + + Computes the determinant of this matrix. + + The determinant of this matrix. + + + + Returns the elements of the diagonal in a . + + The elements of the diagonal. + For non-square matrices, the method returns Min(Rows, Columns) elements where + i == j (i is the row index, and j is the column index). + + + + Copies the values of the given array to the diagonal. + + The array to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + + Copies the values of the given to the diagonal. + + The vector to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced L2 norm of the matrix. + The largest singular value of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + Calculates the condition number of this matrix. + The condition number of the matrix. + + + Computes the inverse of this matrix. + If is not a square matrix. + If is singular. + The inverse of this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Creates a matrix that contains the values from the requested sub-matrix. + + The row to start copying from. + The number of rows to copy. Must be positive. + The column to start copying from. + The number of columns to copy. Must be positive. + The requested sub-matrix. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + If or + is not positive. + + + + Permute the columns of a matrix according to a permutation. + + The column permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Permute the rows of a matrix according to a permutation. + + The row permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Evaluates whether this matrix is symmetric. + + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + A class which encapsulates the functionality of a Cholesky factorization. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Gets the determinant of the matrix for which the Cholesky matrix was computed. + + + + + Gets the log determinant of the matrix for which the Cholesky matrix was computed. + + + + + A class which encapsulates the functionality of a Cholesky factorization for dense matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Symmetric Householder reduction to tridiagonal form. + + Data array of matrix V (eigenvectors) + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tred2 by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + Data array of matrix V (eigenvectors) + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Nonsymmetric reduction to Hessenberg form. + + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Complex scalar division X/Y. + + Real part of X + Imaginary part of X + Real part of Y + Imaginary part of Y + Division result as a number. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an orthogonal matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Factorize matrix using the modified Gram-Schmidt method. + + Initial matrix. On exit is replaced by Q. + Number of rows in Q. + Number of columns in Q. + On exit is filled by R. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Gets or sets Tau vector. Contains additional information on Q - used for native solver. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The type of QR factorization to perform. + If is null. + If row count is less then column count + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + If SVD algorithm failed to converge with matrix . + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + Matrix V is encoded in the property EigenVectors in the way that: + - column corresponding to real eigenvalue represents real eigenvector, + - columns corresponding to the pair of complex conjugate eigenvalues + lambda[i] and lambda[i+1] encode real and imaginary parts of eigenvectors. + + + + + Gets the absolute value of determinant of the square matrix for which the EVD was computed. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + In the Math.Net implementation we also store a set of pivot elements for increased + numerical stability. The pivot elements encode a permutation matrix P such that P*A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Gets the determinant of the matrix for which the LU factorization was computed. + + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A (m x n) may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + If a factorization is performed, the resulting Q matrix is an m x m matrix + and the R matrix is an m x n matrix. If a factorization is performed, the + resulting Q matrix is an m x n matrix and the R matrix is an n x n matrix. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD). + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets the two norm of the . + + The 2-norm of the . + + + + Gets the condition number max(S) / min(S) + + The condition number. + + + + Gets the determinant of the square matrix for which the SVD was computed. + + + + + A class which encapsulates the functionality of a Cholesky factorization for user matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Symmetric Householder reduction to tridiagonal form. + + The eigen vectors to work on. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tred2 by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + The eigen vectors to work on. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Nonsymmetric reduction to Hessenberg form. + + The eigen vectors to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + The eigen vectors to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Complex scalar division X/Y. + + Real part of X + Imaginary part of X + Real part of Y + Imaginary part of Y + Division result as a number. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an orthogonal matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The QR factorization method to use. + If is null. + + + + Generate column from initial matrix to work array + + Initial matrix + The first row + Column index + Generated vector + + + + Perform calculation of Q or R + + Work array + Q or R matrices + The first row + The last row + The first column + The last column + Number of available CPUs + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + + + + + Calculates absolute value of multiplied on signum function of + + Double value z1 + Double value z2 + Result multiplication of signum function and absolute value + + + + Swap column and + + Source matrix + The number of rows in + Column A index to swap + Column B index to swap + + + + Scale column by starting from row + + Source matrix + The number of rows in + Column to scale + Row to scale from + Scale value + + + + Scale vector by starting from index + + Source vector + Row to scale from + Scale value + + + + Given the Cartesian coordinates (da, db) of a point p, these fucntion return the parameters da, db, c, and s + associated with the Givens rotation that zeros the y-coordinate of the point. + + Provides the x-coordinate of the point p. On exit contains the parameter r associated with the Givens rotation + Provides the y-coordinate of the point p. On exit contains the parameter z associated with the Givens rotation + Contains the parameter c associated with the Givens rotation + Contains the parameter s associated with the Givens rotation + This is equivalent to the DROTG LAPACK routine. + + + + Calculate Norm 2 of the column in matrix starting from row + + Source matrix + The number of rows in + Column index + Start row index + Norm2 (Euclidean norm) of the column + + + + Calculate Norm 2 of the vector starting from index + + Source vector + Start index + Norm2 (Euclidean norm) of the vector + + + + Calculate dot product of and + + Source matrix + The number of rows in + Index of column A + Index of column B + Starting row index + Dot product value + + + + Performs rotation of points in the plane. Given two vectors x and y , + each vector element of these vectors is replaced as follows: x(i) = c*x(i) + s*y(i); y(i) = c*y(i) - s*x(i) + + Source matrix + The number of rows in + Index of column A + Index of column B + Scalar "c" value + Scalar "s" value + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + double version of the class. + + + + + Initializes a new instance of the Matrix class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Returns the conjugate transpose of this matrix. + + The conjugate transpose of this matrix. + + + + Puts the conjugate transpose of this matrix into the result matrix. + + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to divide by each element of the matrix. + The matrix to store the result of the division. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The matrix to store the result of the pointwise power. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Computes the Moore-Penrose Pseudo-Inverse of this matrix. + + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Calculates the p-norms of all row vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the p-norms of all column vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all row vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all column vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the value sum of each row vector. + + + + + Calculates the absolute value sum of each row vector. + + + + + Calculates the value sum of each column vector. + + + + + Calculates the absolute value sum of each column vector. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Bi-Conjugate Gradient Stabilized (BiCGStab) solver is an 'improvement' + of the standard Conjugate Gradient (CG) solver. Unlike the CG solver the + BiCGStab can be used on non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The Bi-CGSTAB algorithm was taken from:
+ Templates for the solution of linear systems: Building blocks + for iterative methods +
+ Richard Barrett, Michael Berry, Tony F. Chan, James Demmel, + June M. Donato, Jack Dongarra, Victor Eijkhout, Roldan Pozo, + Charles Romine and Henk van der Vorst +
+ Url: http://www.netlib.org/templates/Templates.html +
+ Algorithm is described in Chapter 2, section 2.3.8, page 27 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient , A. + The solution , b. + The result , x. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A composite matrix solver. The actual solver is made by a sequence of + matrix solvers. + + + + Solver based on:
+ Faster PDE-based simulations using robust composite linear solvers
+ S. Bhowmicka, P. Raghavan a,*, L. McInnes b, B. Norris
+ Future Generation Computer Systems, Vol 20, 2004, pp 373–387
+
+ + Note that if an iterator is passed to this solver it will be used for all the sub-solvers. + +
+
+ + + The collection of solvers that will be used + + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A diagonal preconditioner. The preconditioner uses the inverse + of the matrix diagonal as preconditioning values. + + + + + The inverse of the matrix diagonal. + + + + + Returns the decomposed matrix diagonal. + + The matrix diagonal. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + A Generalized Product Bi-Conjugate Gradient iterative matrix solver. + + + + The Generalized Product Bi-Conjugate Gradient (GPBiCG) solver is an + alternative version of the Bi-Conjugate Gradient stabilized (CG) solver. + Unlike the CG solver the GPBiCG solver can be used on + non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The GPBiCG algorithm was taken from:
+ GPBiCG(m,l): A hybrid of BiCGSTAB and GPBiCG methods with + efficiency and robustness +
+ S. Fujino +
+ Applied Numerical Mathematics, Volume 41, 2002, pp 107 - 117 +
+
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Indicates the number of BiCGStab steps should be taken + before switching. + + + + + Indicates the number of GPBiCG steps should be taken + before switching. + + + + + Gets or sets the number of steps taken with the BiCgStab algorithm + before switching over to the GPBiCG algorithm. + + + + + Gets or sets the number of steps taken with the GPBiCG algorithm + before switching over to the BiCgStab algorithm. + + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Decide if to do steps with BiCgStab + + Number of iteration + true if yes, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + An incomplete, level 0, LU factorization preconditioner. + + + The ILU(0) algorithm was taken from:
+ Iterative methods for sparse linear systems
+ Yousef Saad
+ Algorithm is described in Chapter 10, section 10.3.2, page 275
+
+
+ + + The matrix holding the lower (L) and upper (U) matrices. The + decomposition matrices are combined to reduce storage. + + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + A new matrix containing the lower triagonal elements. + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + This class performs an Incomplete LU factorization with drop tolerance + and partial pivoting. The drop tolerance indicates which additional entries + will be dropped from the factorized LU matrices. + + + The ILUTP-Mem algorithm was taken from:
+ ILUTP_Mem: a Space-Efficient Incomplete LU Preconditioner +
+ Tzu-Yi Chen, Department of Mathematics and Computer Science,
+ Pomona College, Claremont CA 91711, USA
+ Published in:
+ Lecture Notes in Computer Science
+ Volume 3046 / 2004
+ pp. 20 - 28
+ Algorithm is described in Section 2, page 22 +
+
+ + + The default fill level. + + + + + The default drop tolerance. + + + + + The decomposed upper triangular matrix. + + + + + The decomposed lower triangular matrix. + + + + + The array containing the pivot values. + + + + + The fill level. + + + + + The drop tolerance. + + + + + The pivot tolerance. + + + + + Initializes a new instance of the class with the default settings. + + + + + Initializes a new instance of the class with the specified settings. + + + The amount of fill that is allowed in the matrix. The value is a fraction of + the number of non-zero entries in the original matrix. Values should be positive. + + + The absolute drop tolerance which indicates below what absolute value an entry + will be dropped from the matrix. A drop tolerance of 0.0 means that no values + will be dropped. Values should always be positive. + + + The pivot tolerance which indicates at what level pivoting will take place. A + value of 0.0 means that no pivoting will take place. + + + + + Gets or sets the amount of fill that is allowed in the matrix. The + value is a fraction of the number of non-zero entries in the original + matrix. The standard value is 200. + + + + Values should always be positive and can be higher than 1.0. A value lower + than 1.0 means that the eventual preconditioner matrix will have fewer + non-zero entries as the original matrix. A value higher than 1.0 means that + the eventual preconditioner can have more non-zero values than the original + matrix. + + + Note that any changes to the FillLevel after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the absolute drop tolerance which indicates below what absolute value + an entry will be dropped from the matrix. The standard value is 0.0001. + + + + The values should always be positive and can be larger than 1.0. A low value will + keep more small numbers in the preconditioner matrix. A high value will remove + more small numbers from the preconditioner matrix. + + + Note that any changes to the DropTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the pivot tolerance which indicates at what level pivoting will + take place. The standard value is 0.0 which means pivoting will never take place. + + + + The pivot tolerance is used to calculate if pivoting is necessary. Pivoting + will take place if any of the values in a row is bigger than the + diagonal value of that row divided by the pivot tolerance, i.e. pivoting + will take place if row(i,j) > row(i,i) / PivotTolerance for + any j that is not equal to i. + + + Note that any changes to the PivotTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the lower triagonal elements. + + + + Returns the pivot array. This array is not needed for normal use because + the preconditioner will return the solution vector values in the proper order. + + + This method is used for debugging purposes only and should normally not be used. + + The pivot array. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. Note that the + method takes a general matrix type. However internally the data is stored + as a sparse matrix. Therefore it is not recommended to pass a dense matrix. + + If is . + If is not a square matrix. + + + + Pivot elements in the according to internal pivot array + + Row to pivot in + + + + Was pivoting already performed + + Pivots already done + Current item to pivot + true if performed, otherwise false + + + + Swap columns in the + + Source . + First column index to swap + Second column index to swap + + + + Sort vector descending, not changing vector but placing sorted indicies to + + Start sort form + Sort till upper bound + Array with sorted vector indicies + Source + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + Pivot elements in according to internal pivot array + + Source . + Result after pivoting. + + + + An element sort algorithm for the class. + + + This sort algorithm is used to sort the columns in a sparse matrix based on + the value of the element on the diagonal of the matrix. + + + + + Sorts the elements of the vector in decreasing + fashion. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Sorts the elements of the vector in decreasing + fashion using heap sort algorithm. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Build heap for double indicies + + Root position + Length of + Indicies of + Target + + + + Sift double indicies + + Indicies of + Target + Root position + Length of + + + + Sorts the given integers in a decreasing fashion. + + The values. + + + + Sort the given integers in a decreasing fashion using heapsort algorithm + + Array of values to sort + Length of + + + + Build heap + + Target values array + Root position + Length of + + + + Sift values + + Target value array + Root position + Length of + + + + Exchange values in array + + Target values array + First value to exchange + Second value to exchange + + + + A simple milu(0) preconditioner. + + + Original Fortran code by Youcef Saad (07 January 2004) + + + + Use modified or standard ILU(0) + + + + Gets or sets a value indicating whether to use modified or standard ILU(0). + + + + + Gets a value indicating whether the preconditioner is initialized. + + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square or is not an + instance of SparseCompressedRowMatrixStorage. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector b. + The left hand side vector x. + + + + MILU0 is a simple milu(0) preconditioner. + + Order of the matrix. + Matrix values in CSR format (input). + Column indices (input). + Row pointers (input). + Matrix values in MSR format (output). + Row pointers and column indices (output). + Pointer to diagonal elements (output). + True if the modified/MILU algorithm should be used (recommended) + Returns 0 on success or k > 0 if a zero pivot was encountered at step k. + + + + A Multiple-Lanczos Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Multiple-Lanczos Bi-Conjugate Gradient stabilized (ML(k)-BiCGStab) solver is an 'improvement' + of the standard BiCgStab solver. + + + The algorithm was taken from:
+ ML(k)BiCGSTAB: A BiCGSTAB variant based on multiple Lanczos starting vectors +
+ Man-chung Yeung and Tony F. Chan +
+ SIAM Journal of Scientific Computing +
+ Volume 21, Number 4, pp. 1263 - 1290 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + The default number of starting vectors. + + + + + The collection of starting vectors which are used as the basis for the Krylov sub-space. + + + + + The number of starting vectors used by the algorithm + + + + + Gets or sets the number of starting vectors. + + + Must be larger than 1 and smaller than the number of variables in the matrix that + for which this solver will be used. + + + + + Resets the number of starting vectors to the default value. + + + + + Gets or sets a series of orthonormal vectors which will be used as basis for the + Krylov sub-space. + + + + + Gets the number of starting vectors to create + + Maximum number + Number of variables + Number of starting vectors to create + + + + Returns an array of starting vectors. + + The maximum number of starting vectors that should be created. + The number of variables. + + An array with starting vectors. The array will never be larger than the + but it may be smaller if + the is smaller than + the . + + + + + Create random vecrors array + + Number of vectors + Size of each vector + Array of random vectors + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Source A. + Residual data. + x data. + b data. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Transpose Free Quasi-Minimal Residual (TFQMR) iterative matrix solver. + + + + The TFQMR algorithm was taken from:
+ Iterative methods for sparse linear systems. +
+ Yousef Saad +
+ Algorithm is described in Chapter 7, section 7.4.3, page 219 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Is even? + + Number to check + true if even, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Matrix with sparse storage, intended for very large matrices where most of the cells are zero. + The underlying storage scheme is 3-array compressed-sparse-row (CSR) Format. + Wikipedia - CSR. + + + + + Gets the number of non zero elements in the matrix. + + The number of non zero elements. + + + + Create a new sparse matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new sparse matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable. + The enumerable is assumed to be in row-major order (row by row). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + + Create a new sparse matrix with the given number of rows and columns as a copy of the given array. + The array is assumed to be in column-major order (column by column). + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix and initialize each value to the same provided value. + + + + + Create a new sparse matrix and initialize each value using the provided init function. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Evaluates whether this matrix is symmetric. + + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + A vector with sparse storage, intended for very large vectors where most of the cells are zero. + + The sparse vector is not thread safe. + + + + Gets the number of non zero elements in the vector. + + The number of non zero elements. + + + + Create a new sparse vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new sparse vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new sparse vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector and initialize each value using the provided value. + + + + + Create a new sparse vector and initialize each value using the provided init function. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + Warning, the new 'sparse vector' with a non-zero scalar added to it will be a 100% filled + sparse vector and very inefficient. Would be better to work with a dense vector instead. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Negates vector and saves result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Multiplies a vector with a scalar. + + The vector to scale. + The scalar value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a scalar. + + The scalar value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a scalar. + + The vector to divide. + The scalar value. + The result of the division. + If is . + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + of each element of the vector of the given divisor. + + The vector whose elements we want to compute the modulus of. + The divisor to use, + The result of the calculation + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Creates a double sparse vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n,n,..', '(n,n,..)', '[n,n,...]', where n is a double. + + + A double sparse vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a real sparse vector to double-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a real sparse vector to double-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + double version of the class. + + + + + Initializes a new instance of the Vector class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Conjugates vector and save result to + + Target vector + + + + Negates vector and saves result to + + Target vector + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Divides each element of the vector by a scalar and stores the result in the result vector. + + + The scalar to divide with. + + + The vector to store the result of the division. + + + + + Divides a scalar by each element of the vector and stores the result in the result vector. + + The scalar to divide. + The vector to store the result of the division. + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise raise this vector to an exponent and store the result into the result vector. + + The exponent to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Returns the value of the absolute minimum element. + + The value of the absolute minimum element. + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the value of the absolute maximum element. + + The value of the absolute maximum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + + The p value. + + + Scalar ret = ( ∑|At(i)|^p )^(1/p) + + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Normalizes this vector to a unit vector with respect to the p-norm. + + + The p value. + + + This vector normalized to a unit vector with respect to the p-norm. + + + + + A Matrix class with dense storage. The underlying storage is a one dimensional array in column-major order (column by column). + + + + + Number of rows. + + Using this instead of the RowCount property to speed up calculating + a matrix index in the data array. + + + + Number of columns. + + Using this instead of the ColumnCount property to speed up calculating + a matrix index in the data array. + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new dense matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new dense matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to be in column-major order (column by column) and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + + Create a new dense matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable. + The enumerable is assumed to be in column-major order (column by column). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix and initialize each value to the same provided value. + + + + + Create a new dense matrix and initialize each value using the provided init function. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new dense matrix with values sampled from the provided random distribution. + + + + + Gets the matrix's data. + + The matrix's data. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of add + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the matrix and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Evaluates whether this matrix is symmetric. + + + + + A vector using dense storage. + + + + + Number of elements + + + + + Gets the vector's data. + + + + + Create a new dense vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new dense vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new dense vector directly binding to a raw array. + The array is used directly without copying. + Very efficient, but changes to the array and the vector will affect each other. + + + + + Create a new dense vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector and initialize each value using the provided value. + + + + + Create a new dense vector and initialize each value using the provided init function. + + + + + Create a new dense vector with values sampled from the provided random distribution. + + + + + Gets the vector's data. + + The vector's data. + + + + Returns a reference to the internal data structure. + + The DenseVector whose internal data we are + returning. + + A reference to the internal date of the given vector. + + + + + Returns a vector bound directly to a reference of the provided array. + + The array to bind to the DenseVector object. + + A DenseVector whose values are bound to the given array. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + The scalar to add. + The vector to store the result of the addition. + + + + Adds another vector to this vector and stores the result into the result vector. + + The vector to add to this one. + The vector to store the result of the addition. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The vector to store the result of the subtraction. + + + + Subtracts another vector from this vector and stores the result into the result vector. + + The vector to subtract from this one. + The vector to store the result of the subtraction. + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Negates vector and saves result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + The scalar to multiply. + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Multiplies a vector with a scalar. + + The vector to scale. + The scalar value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a scalar. + + The scalar value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a scalar. + + The vector to divide. + The scalar value. + The result of the division. + If is . + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + of each element of the vector of the given divisor. + + The vector whose elements we want to compute the modulus of. + The divisor to use, + The result of the calculation + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Creates a float dense vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n,n,..', '(n,n,..)', '[n,n,...]', where n is a float. + + + A float dense vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a real dense vector to float-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a real dense vector to float-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + A matrix type for diagonal matrices. + + + Diagonal matrices can be non-square matrices but the diagonal always starts + at element 0,0. A diagonal matrix will throw an exception if non diagonal + entries are set. The exception to this is when the off diagonal elements are + 0.0 or NaN; these settings will cause no change to the diagonal matrix. + + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new diagonal matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All diagonal cells of the matrix will be initialized to the provided value, all non-diagonal ones to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to contain the diagonal elements only and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new diagonal matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + The matrix to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + The array to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided enumerable. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new diagonal matrix with diagonal values sampled from the provided random distribution. + + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + If the result matrix's dimensions are not the same as this matrix. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to add. + The matrix to store the result of the division. + + + + Computes the determinant of this matrix. + + The determinant of this matrix. + + + + Returns the elements of the diagonal in a . + + The elements of the diagonal. + For non-square matrices, the method returns Min(Rows, Columns) elements where + i == j (i is the row index, and j is the column index). + + + + Copies the values of the given array to the diagonal. + + The array to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + + Copies the values of the given to the diagonal. + + The vector to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced L2 norm of the matrix. + The largest singular value of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + Calculates the condition number of this matrix. + The condition number of the matrix. + + + Computes the inverse of this matrix. + If is not a square matrix. + If is singular. + The inverse of this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Creates a matrix that contains the values from the requested sub-matrix. + + The row to start copying from. + The number of rows to copy. Must be positive. + The column to start copying from. + The number of columns to copy. Must be positive. + The requested sub-matrix. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + If or + is not positive. + + + + Permute the columns of a matrix according to a permutation. + + The column permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Permute the rows of a matrix according to a permutation. + + The row permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Evaluates whether this matrix is symmetric. + + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + A class which encapsulates the functionality of a Cholesky factorization. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Gets the determinant of the matrix for which the Cholesky matrix was computed. + + + + + Gets the log determinant of the matrix for which the Cholesky matrix was computed. + + + + + A class which encapsulates the functionality of a Cholesky factorization for dense matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Symmetric Householder reduction to tridiagonal form. + + Data array of matrix V (eigenvectors) + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tred2 by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + Data array of matrix V (eigenvectors) + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Nonsymmetric reduction to Hessenberg form. + + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Complex scalar division X/Y. + + Real part of X + Imaginary part of X + Real part of Y + Imaginary part of Y + Division result as a number. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an orthogonal matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Factorize matrix using the modified Gram-Schmidt method. + + Initial matrix. On exit is replaced by Q. + Number of rows in Q. + Number of columns in Q. + On exit is filled by R. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Gets or sets Tau vector. Contains additional information on Q - used for native solver. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The QR factorization method to use. + If is null. + If row count is less then column count + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + If SVD algorithm failed to converge with matrix . + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Gets the absolute value of determinant of the square matrix for which the EVD was computed. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + In the Math.Net implementation we also store a set of pivot elements for increased + numerical stability. The pivot elements encode a permutation matrix P such that P*A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Gets the determinant of the matrix for which the LU factorization was computed. + + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A (m x n) may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + If a factorization is peformed, the resulting Q matrix is an m x m matrix + and the R matrix is an m x n matrix. If a factorization is performed, the + resulting Q matrix is an m x n matrix and the R matrix is an n x n matrix. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD). + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets the two norm of the . + + The 2-norm of the . + + + + Gets the condition number max(S) / min(S) + + The condition number. + + + + Gets the determinant of the square matrix for which the SVD was computed. + + + + + A class which encapsulates the functionality of a Cholesky factorization for user matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Symmetric Householder reduction to tridiagonal form. + + The eigen vectors to work on. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tred2 by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + The eigen vectors to work on. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Nonsymmetric reduction to Hessenberg form. + + The eigen vectors to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + The eigen vectors to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Complex scalar division X/Y. + + Real part of X + Imaginary part of X + Real part of Y + Imaginary part of Y + Division result as a number. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an orthogonal matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The QR factorization method to use. + If is null. + + + + Generate column from initial matrix to work array + + Initial matrix + The first row + Column index + Generated vector + + + + Perform calculation of Q or R + + Work array + Q or R matrices + The first row + The last row + The first column + The last column + Number of available CPUs + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + + + + + Calculates absolute value of multiplied on signum function of + + Double value z1 + Double value z2 + Result multiplication of signum function and absolute value + + + + Swap column and + + Source matrix + The number of rows in + Column A index to swap + Column B index to swap + + + + Scale column by starting from row + + Source matrix + The number of rows in + Column to scale + Row to scale from + Scale value + + + + Scale vector by starting from index + + Source vector + Row to scale from + Scale value + + + + Given the Cartesian coordinates (da, db) of a point p, these fucntion return the parameters da, db, c, and s + associated with the Givens rotation that zeros the y-coordinate of the point. + + Provides the x-coordinate of the point p. On exit contains the parameter r associated with the Givens rotation + Provides the y-coordinate of the point p. On exit contains the parameter z associated with the Givens rotation + Contains the parameter c associated with the Givens rotation + Contains the parameter s associated with the Givens rotation + This is equivalent to the DROTG LAPACK routine. + + + + Calculate Norm 2 of the column in matrix starting from row + + Source matrix + The number of rows in + Column index + Start row index + Norm2 (Euclidean norm) of the column + + + + Calculate Norm 2 of the vector starting from index + + Source vector + Start index + Norm2 (Euclidean norm) of the vector + + + + Calculate dot product of and + + Source matrix + The number of rows in + Index of column A + Index of column B + Starting row index + Dot product value + + + + Performs rotation of points in the plane. Given two vectors x and y , + each vector element of these vectors is replaced as follows: x(i) = c*x(i) + s*y(i); y(i) = c*y(i) - s*x(i) + + Source matrix + The number of rows in + Index of column A + Index of column B + Scalar "c" value + Scalar "s" value + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + float version of the class. + + + + + Initializes a new instance of the Matrix class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Returns the conjugate transpose of this matrix. + + The conjugate transpose of this matrix. + + + + Puts the conjugate transpose of this matrix into the result matrix. + + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to divide by each element of the matrix. + The matrix to store the result of the division. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The matrix to store the result of the pointwise power. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Computes the Moore-Penrose Pseudo-Inverse of this matrix. + + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Calculates the p-norms of all row vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the p-norms of all column vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all row vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all column vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the value sum of each row vector. + + + + + Calculates the absolute value sum of each row vector. + + + + + Calculates the value sum of each column vector. + + + + + Calculates the absolute value sum of each column vector. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Bi-Conjugate Gradient Stabilized (BiCGStab) solver is an 'improvement' + of the standard Conjugate Gradient (CG) solver. Unlike the CG solver the + BiCGStab can be used on non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The Bi-CGSTAB algorithm was taken from:
+ Templates for the solution of linear systems: Building blocks + for iterative methods +
+ Richard Barrett, Michael Berry, Tony F. Chan, James Demmel, + June M. Donato, Jack Dongarra, Victor Eijkhout, Roldan Pozo, + Charles Romine and Henk van der Vorst +
+ Url: http://www.netlib.org/templates/Templates.html +
+ Algorithm is described in Chapter 2, section 2.3.8, page 27 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient , A. + The solution , b. + The result , x. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A composite matrix solver. The actual solver is made by a sequence of + matrix solvers. + + + + Solver based on:
+ Faster PDE-based simulations using robust composite linear solvers
+ S. Bhowmicka, P. Raghavan a,*, L. McInnes b, B. Norris
+ Future Generation Computer Systems, Vol 20, 2004, pp 373–387
+
+ + Note that if an iterator is passed to this solver it will be used for all the sub-solvers. + +
+
+ + + The collection of solvers that will be used + + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A diagonal preconditioner. The preconditioner uses the inverse + of the matrix diagonal as preconditioning values. + + + + + The inverse of the matrix diagonal. + + + + + Returns the decomposed matrix diagonal. + + The matrix diagonal. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + A Generalized Product Bi-Conjugate Gradient iterative matrix solver. + + + + The Generalized Product Bi-Conjugate Gradient (GPBiCG) solver is an + alternative version of the Bi-Conjugate Gradient stabilized (CG) solver. + Unlike the CG solver the GPBiCG solver can be used on + non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The GPBiCG algorithm was taken from:
+ GPBiCG(m,l): A hybrid of BiCGSTAB and GPBiCG methods with + efficiency and robustness +
+ S. Fujino +
+ Applied Numerical Mathematics, Volume 41, 2002, pp 107 - 117 +
+
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Indicates the number of BiCGStab steps should be taken + before switching. + + + + + Indicates the number of GPBiCG steps should be taken + before switching. + + + + + Gets or sets the number of steps taken with the BiCgStab algorithm + before switching over to the GPBiCG algorithm. + + + + + Gets or sets the number of steps taken with the GPBiCG algorithm + before switching over to the BiCgStab algorithm. + + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Decide if to do steps with BiCgStab + + Number of iteration + true if yes, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + An incomplete, level 0, LU factorization preconditioner. + + + The ILU(0) algorithm was taken from:
+ Iterative methods for sparse linear systems
+ Yousef Saad
+ Algorithm is described in Chapter 10, section 10.3.2, page 275
+
+
+ + + The matrix holding the lower (L) and upper (U) matrices. The + decomposition matrices are combined to reduce storage. + + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + A new matrix containing the lower triagonal elements. + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + This class performs an Incomplete LU factorization with drop tolerance + and partial pivoting. The drop tolerance indicates which additional entries + will be dropped from the factorized LU matrices. + + + The ILUTP-Mem algorithm was taken from:
+ ILUTP_Mem: a Space-Efficient Incomplete LU Preconditioner +
+ Tzu-Yi Chen, Department of Mathematics and Computer Science,
+ Pomona College, Claremont CA 91711, USA
+ Published in:
+ Lecture Notes in Computer Science
+ Volume 3046 / 2004
+ pp. 20 - 28
+ Algorithm is described in Section 2, page 22 +
+
+ + + The default fill level. + + + + + The default drop tolerance. + + + + + The decomposed upper triangular matrix. + + + + + The decomposed lower triangular matrix. + + + + + The array containing the pivot values. + + + + + The fill level. + + + + + The drop tolerance. + + + + + The pivot tolerance. + + + + + Initializes a new instance of the class with the default settings. + + + + + Initializes a new instance of the class with the specified settings. + + + The amount of fill that is allowed in the matrix. The value is a fraction of + the number of non-zero entries in the original matrix. Values should be positive. + + + The absolute drop tolerance which indicates below what absolute value an entry + will be dropped from the matrix. A drop tolerance of 0.0 means that no values + will be dropped. Values should always be positive. + + + The pivot tolerance which indicates at what level pivoting will take place. A + value of 0.0 means that no pivoting will take place. + + + + + Gets or sets the amount of fill that is allowed in the matrix. The + value is a fraction of the number of non-zero entries in the original + matrix. The standard value is 200. + + + + Values should always be positive and can be higher than 1.0. A value lower + than 1.0 means that the eventual preconditioner matrix will have fewer + non-zero entries as the original matrix. A value higher than 1.0 means that + the eventual preconditioner can have more non-zero values than the original + matrix. + + + Note that any changes to the FillLevel after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the absolute drop tolerance which indicates below what absolute value + an entry will be dropped from the matrix. The standard value is 0.0001. + + + + The values should always be positive and can be larger than 1.0. A low value will + keep more small numbers in the preconditioner matrix. A high value will remove + more small numbers from the preconditioner matrix. + + + Note that any changes to the DropTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the pivot tolerance which indicates at what level pivoting will + take place. The standard value is 0.0 which means pivoting will never take place. + + + + The pivot tolerance is used to calculate if pivoting is necessary. Pivoting + will take place if any of the values in a row is bigger than the + diagonal value of that row divided by the pivot tolerance, i.e. pivoting + will take place if row(i,j) > row(i,i) / PivotTolerance for + any j that is not equal to i. + + + Note that any changes to the PivotTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the lower triagonal elements. + + + + Returns the pivot array. This array is not needed for normal use because + the preconditioner will return the solution vector values in the proper order. + + + This method is used for debugging purposes only and should normally not be used. + + The pivot array. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. Note that the + method takes a general matrix type. However internally the data is stored + as a sparse matrix. Therefore it is not recommended to pass a dense matrix. + + If is . + If is not a square matrix. + + + + Pivot elements in the according to internal pivot array + + Row to pivot in + + + + Was pivoting already performed + + Pivots already done + Current item to pivot + true if performed, otherwise false + + + + Swap columns in the + + Source . + First column index to swap + Second column index to swap + + + + Sort vector descending, not changing vector but placing sorted indicies to + + Start sort form + Sort till upper bound + Array with sorted vector indicies + Source + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + Pivot elements in according to internal pivot array + + Source . + Result after pivoting. + + + + An element sort algorithm for the class. + + + This sort algorithm is used to sort the columns in a sparse matrix based on + the value of the element on the diagonal of the matrix. + + + + + Sorts the elements of the vector in decreasing + fashion. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Sorts the elements of the vector in decreasing + fashion using heap sort algorithm. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Build heap for double indicies + + Root position + Length of + Indicies of + Target + + + + Sift double indicies + + Indicies of + Target + Root position + Length of + + + + Sorts the given integers in a decreasing fashion. + + The values. + + + + Sort the given integers in a decreasing fashion using heapsort algorithm + + Array of values to sort + Length of + + + + Build heap + + Target values array + Root position + Length of + + + + Sift values + + Target value array + Root position + Length of + + + + Exchange values in array + + Target values array + First value to exchange + Second value to exchange + + + + A simple milu(0) preconditioner. + + + Original Fortran code by Youcef Saad (07 January 2004) + + + + Use modified or standard ILU(0) + + + + Gets or sets a value indicating whether to use modified or standard ILU(0). + + + + + Gets a value indicating whether the preconditioner is initialized. + + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square or is not an + instance of SparseCompressedRowMatrixStorage. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector b. + The left hand side vector x. + + + + MILU0 is a simple milu(0) preconditioner. + + Order of the matrix. + Matrix values in CSR format (input). + Column indices (input). + Row pointers (input). + Matrix values in MSR format (output). + Row pointers and column indices (output). + Pointer to diagonal elements (output). + True if the modified/MILU algorithm should be used (recommended) + Returns 0 on success or k > 0 if a zero pivot was encountered at step k. + + + + A Multiple-Lanczos Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Multiple-Lanczos Bi-Conjugate Gradient stabilized (ML(k)-BiCGStab) solver is an 'improvement' + of the standard BiCgStab solver. + + + The algorithm was taken from:
+ ML(k)BiCGSTAB: A BiCGSTAB variant based on multiple Lanczos starting vectors +
+ Man-chung Yeung and Tony F. Chan +
+ SIAM Journal of Scientific Computing +
+ Volume 21, Number 4, pp. 1263 - 1290 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + The default number of starting vectors. + + + + + The collection of starting vectors which are used as the basis for the Krylov sub-space. + + + + + The number of starting vectors used by the algorithm + + + + + Gets or sets the number of starting vectors. + + + Must be larger than 1 and smaller than the number of variables in the matrix that + for which this solver will be used. + + + + + Resets the number of starting vectors to the default value. + + + + + Gets or sets a series of orthonormal vectors which will be used as basis for the + Krylov sub-space. + + + + + Gets the number of starting vectors to create + + Maximum number + Number of variables + Number of starting vectors to create + + + + Returns an array of starting vectors. + + The maximum number of starting vectors that should be created. + The number of variables. + + An array with starting vectors. The array will never be larger than the + but it may be smaller if + the is smaller than + the . + + + + + Create random vectors array + + Number of vectors + Size of each vector + Array of random vectors + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Source A. + Residual data. + x data. + b data. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Transpose Free Quasi-Minimal Residual (TFQMR) iterative matrix solver. + + + + The TFQMR algorithm was taken from:
+ Iterative methods for sparse linear systems. +
+ Yousef Saad +
+ Algorithm is described in Chapter 7, section 7.4.3, page 219 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Is even? + + Number to check + true if even, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Matrix with sparse storage, intended for very large matrices where most of the cells are zero. + The underlying storage scheme is 3-array compressed-sparse-row (CSR) Format. + Wikipedia - CSR. + + + + + Gets the number of non zero elements in the matrix. + + The number of non zero elements. + + + + Create a new sparse matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new sparse matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable. + The enumerable is assumed to be in row-major order (row by row). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + + Create a new sparse matrix with the given number of rows and columns as a copy of the given array. + The array is assumed to be in column-major order (column by column). + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix and initialize each value to the same provided value. + + + + + Create a new sparse matrix and initialize each value using the provided init function. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Evaluates whether this matrix is symmetric. + + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + A vector with sparse storage, intended for very large vectors where most of the cells are zero. + + The sparse vector is not thread safe. + + + + Gets the number of non zero elements in the vector. + + The number of non zero elements. + + + + Create a new sparse vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new sparse vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new sparse vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector and initialize each value using the provided value. + + + + + Create a new sparse vector and initialize each value using the provided init function. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + Warning, the new 'sparse vector' with a non-zero scalar added to it will be a 100% filled + sparse vector and very inefficient. Would be better to work with a dense vector instead. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Negates vector and saves result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Multiplies a vector with a scalar. + + The vector to scale. + The scalar value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a scalar. + + The scalar value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a scalar. + + The vector to divide. + The scalar value. + The result of the division. + If is . + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + of each element of the vector of the given divisor. + + The vector whose elements we want to compute the modulus of. + The divisor to use, + The result of the calculation + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Creates a float sparse vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n,n,..', '(n,n,..)', '[n,n,...]', where n is a float. + + + A float sparse vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a real sparse vector to float-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a real sparse vector to float-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + float version of the class. + + + + + Initializes a new instance of the Vector class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Conjugates vector and save result to + + Target vector + + + + Negates vector and saves result to + + Target vector + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Divides each element of the vector by a scalar and stores the result in the result vector. + + + The scalar to divide with. + + + The vector to store the result of the division. + + + + + Divides a scalar by each element of the vector and stores the result in the result vector. + + The scalar to divide. + The vector to store the result of the division. + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise raise this vector to an exponent and store the result into the result vector. + + The exponent to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Returns the value of the absolute minimum element. + + The value of the absolute minimum element. + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the value of the absolute maximum element. + + The value of the absolute maximum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + + The p value. + + + Scalar ret = ( ∑|At(i)|^p )^(1/p) + + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Normalizes this vector to a unit vector with respect to the p-norm. + + + The p value. + + + This vector normalized to a unit vector with respect to the p-norm. + + + + + A Matrix class with dense storage. The underlying storage is a one dimensional array in column-major order (column by column). + + + + + Number of rows. + + Using this instead of the RowCount property to speed up calculating + a matrix index in the data array. + + + + Number of columns. + + Using this instead of the ColumnCount property to speed up calculating + a matrix index in the data array. + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new dense matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new dense matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to be in column-major order (column by column) and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + + Create a new dense matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable. + The enumerable is assumed to be in column-major order (column by column). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix and initialize each value to the same provided value. + + + + + Create a new dense matrix and initialize each value using the provided init function. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new dense matrix with values sampled from the provided random distribution. + + + + + Gets the matrix's data. + + The matrix's data. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of add + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the matrix and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Evaluates whether this matrix is symmetric. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A vector using dense storage. + + + + + Number of elements + + + + + Gets the vector's data. + + + + + Create a new dense vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new dense vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new dense vector directly binding to a raw array. + The array is used directly without copying. + Very efficient, but changes to the array and the vector will affect each other. + + + + + Create a new dense vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector and initialize each value using the provided value. + + + + + Create a new dense vector and initialize each value using the provided init function. + + + + + Create a new dense vector with values sampled from the provided random distribution. + + + + + Gets the vector's data. + + The vector's data. + + + + Returns a reference to the internal data structure. + + The DenseVector whose internal data we are + returning. + + A reference to the internal date of the given vector. + + + + + Returns a vector bound directly to a reference of the provided array. + + The array to bind to the DenseVector object. + + A DenseVector whose values are bound to the given array. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + The scalar to add. + The vector to store the result of the addition. + + + + Adds another vector to this vector and stores the result into the result vector. + + The vector to add to this one. + The vector to store the result of the addition. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The vector to store the result of the subtraction. + + + + Subtracts another vector from this vector and stores the result into the result vector. + + The vector to subtract from this one. + The vector to store the result of the subtraction. + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Negates vector and saves result to + + Target vector + + + + Conjugates vector and save result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + The scalar to multiply. + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Multiplies a vector with a complex. + + The vector to scale. + The Complex value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a complex. + + The Complex value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a complex. + + The vector to divide. + The Complex value. + The result of the division. + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Creates a Complex dense vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n;n;..', '(n;n;..)', '[n;n;...]', where n is a double. + + + A Complex dense vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a complex dense vector to double-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a complex dense vector to double-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + A matrix type for diagonal matrices. + + + Diagonal matrices can be non-square matrices but the diagonal always starts + at element 0,0. A diagonal matrix will throw an exception if non diagonal + entries are set. The exception to this is when the off diagonal elements are + 0.0 or NaN; these settings will cause no change to the diagonal matrix. + + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new diagonal matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All diagonal cells of the matrix will be initialized to the provided value, all non-diagonal ones to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to contain the diagonal elements only and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new diagonal matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + The matrix to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + The array to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided enumerable. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new diagonal matrix with diagonal values sampled from the provided random distribution. + + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + If the result matrix's dimensions are not the same as this matrix. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to add. + The matrix to store the result of the division. + + + + Computes the determinant of this matrix. + + The determinant of this matrix. + + + + Returns the elements of the diagonal in a . + + The elements of the diagonal. + For non-square matrices, the method returns Min(Rows, Columns) elements where + i == j (i is the row index, and j is the column index). + + + + Copies the values of the given array to the diagonal. + + The array to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + + Copies the values of the given to the diagonal. + + The vector to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced L2 norm of the matrix. + The largest singular value of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the Frobenius norm of this matrix. + The Frobenius norm of this matrix. + + + Calculates the condition number of this matrix. + The condition number of the matrix. + + + Computes the inverse of this matrix. + If is not a square matrix. + If is singular. + The inverse of this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Creates a matrix that contains the values from the requested sub-matrix. + + The row to start copying from. + The number of rows to copy. Must be positive. + The column to start copying from. + The number of columns to copy. Must be positive. + The requested sub-matrix. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + If or + is not positive. + + + + Permute the columns of a matrix according to a permutation. + + The column permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Permute the rows of a matrix according to a permutation. + + The row permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Evaluates whether this matrix is symmetric. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A class which encapsulates the functionality of a Cholesky factorization. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Gets the determinant of the matrix for which the Cholesky matrix was computed. + + + + + Gets the log determinant of the matrix for which the Cholesky matrix was computed. + + + + + A class which encapsulates the functionality of a Cholesky factorization for dense matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a complex matrix. + + + If A is hermitan, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is hermitan. + I.e. A = V*D*V' and V*VH=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Reduces a complex hermitian matrix to a real symmetric tridiagonal matrix using unitary similarity transformations. + + Source matrix to reduce + Output: Arrays for internal storage of real parts of eigenvalues + Output: Arrays for internal storage of imaginary parts of eigenvalues + Output: Arrays that contains further information about the transformations. + Order of initial matrix + This is derived from the Algol procedures HTRIDI by + Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + Data array of matrix V (eigenvectors) + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Determines eigenvectors by undoing the symmetric tridiagonalize transformation + + Data array of matrix V (eigenvectors) + Previously tridiagonalized matrix by . + Contains further information about the transformations + Input matrix order + This is derived from the Algol procedures HTRIBK, by + by Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Nonsymmetric reduction to Hessenberg form. + + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + Data array of the eigenvectors + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any complex square matrix A may be decomposed as A = QR where Q is an unitary mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an unitary matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Factorize matrix using the modified Gram-Schmidt method. + + Initial matrix. On exit is replaced by Q. + Number of rows in Q. + Number of columns in Q. + On exit is filled by R. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Gets or sets Tau vector. Contains additional information on Q - used for native solver. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The type of QR factorization to perform. + If is null. + If row count is less then column count + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + If SVD algorithm failed to converge with matrix . + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Gets the absolute value of determinant of the square matrix for which the EVD was computed. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + In the Math.Net implementation we also store a set of pivot elements for increased + numerical stability. The pivot elements encode a permutation matrix P such that P*A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Gets the determinant of the matrix for which the LU factorization was computed. + + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A (m x n) may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + If a factorization is peformed, the resulting Q matrix is an m x m matrix + and the R matrix is an m x n matrix. If a factorization is performed, the + resulting Q matrix is an m x n matrix and the R matrix is an n x n matrix. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD). + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets the two norm of the . + + The 2-norm of the . + + + + Gets the condition number max(S) / min(S) + + The condition number. + + + + Gets the determinant of the square matrix for which the SVD was computed. + + + + + A class which encapsulates the functionality of a Cholesky factorization for user matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a complex matrix. + + + If A is hermitan, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is hermitan. + I.e. A = V*D*V' and V*VH=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Reduces a complex hermitian matrix to a real symmetric tridiagonal matrix using unitary similarity transformations. + + Source matrix to reduce + Output: Arrays for internal storage of real parts of eigenvalues + Output: Arrays for internal storage of imaginary parts of eigenvalues + Output: Arrays that contains further information about the transformations. + Order of initial matrix + This is derived from the Algol procedures HTRIDI by + Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + The eigen vectors to work on. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Determines eigenvectors by undoing the symmetric tridiagonalize transformation + + The eigen vectors to work on. + Previously tridiagonalized matrix by . + Contains further information about the transformations + Input matrix order + This is derived from the Algol procedures HTRIBK, by + by Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Nonsymmetric reduction to Hessenberg form. + + The eigen vectors to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + The eigen vectors to work on. + The eigen values to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any complex square matrix A may be decomposed as A = QR where Q is an unitary mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an unitary matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The QR factorization method to use. + If is null. + + + + Generate column from initial matrix to work array + + Initial matrix + The first row + Column index + Generated vector + + + + Perform calculation of Q or R + + Work array + Q or R matrices + The first row + The last row + The first column + The last column + Number of available CPUs + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + + + + + Calculates absolute value of multiplied on signum function of + + Complex value z1 + Complex value z2 + Result multiplication of signum function and absolute value + + + + Interchanges two vectors and + + Source matrix + The number of rows in + Column A index to swap + Column B index to swap + + + + Scale column by starting from row + + Source matrix + The number of rows in + Column to scale + Row to scale from + Scale value + + + + Scale vector by starting from index + + Source vector + Row to scale from + Scale value + + + + Given the Cartesian coordinates (da, db) of a point p, these fucntion return the parameters da, db, c, and s + associated with the Givens rotation that zeros the y-coordinate of the point. + + Provides the x-coordinate of the point p. On exit contains the parameter r associated with the Givens rotation + Provides the y-coordinate of the point p. On exit contains the parameter z associated with the Givens rotation + Contains the parameter c associated with the Givens rotation + Contains the parameter s associated with the Givens rotation + This is equivalent to the DROTG LAPACK routine. + + + + Calculate Norm 2 of the column in matrix starting from row + + Source matrix + The number of rows in + Column index + Start row index + Norm2 (Euclidean norm) of the column + + + + Calculate Norm 2 of the vector starting from index + + Source vector + Start index + Norm2 (Euclidean norm) of the vector + + + + Calculate dot product of and conjugating the first vector. + + Source matrix + The number of rows in + Index of column A + Index of column B + Starting row index + Dot product value + + + + Performs rotation of points in the plane. Given two vectors x and y , + each vector element of these vectors is replaced as follows: x(i) = c*x(i) + s*y(i); y(i) = c*y(i) - s*x(i) + + Source matrix + The number of rows in + Index of column A + Index of column B + scalar cos value + scalar sin value + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Complex version of the class. + + + + + Initializes a new instance of the Matrix class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Returns the conjugate transpose of this matrix. + + The conjugate transpose of this matrix. + + + + Puts the conjugate transpose of this matrix into the result matrix. + + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to divide by each element of the matrix. + The matrix to store the result of the division. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The matrix to store the result of the pointwise power. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Pointwise applies the exponential function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Computes the Moore-Penrose Pseudo-Inverse of this matrix. + + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Calculates the p-norms of all row vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the p-norms of all column vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all row vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all column vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the value sum of each row vector. + + + + + Calculates the absolute value sum of each row vector. + + + + + Calculates the value sum of each column vector. + + + + + Calculates the absolute value sum of each column vector. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Bi-Conjugate Gradient Stabilized (BiCGStab) solver is an 'improvement' + of the standard Conjugate Gradient (CG) solver. Unlike the CG solver the + BiCGStab can be used on non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The Bi-CGSTAB algorithm was taken from:
+ Templates for the solution of linear systems: Building blocks + for iterative methods +
+ Richard Barrett, Michael Berry, Tony F. Chan, James Demmel, + June M. Donato, Jack Dongarra, Victor Eijkhout, Roldan Pozo, + Charles Romine and Henk van der Vorst +
+ Url: http://www.netlib.org/templates/Templates.html +
+ Algorithm is described in Chapter 2, section 2.3.8, page 27 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient , A. + The solution , b. + The result , x. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A composite matrix solver. The actual solver is made by a sequence of + matrix solvers. + + + + Solver based on:
+ Faster PDE-based simulations using robust composite linear solvers
+ S. Bhowmicka, P. Raghavan a,*, L. McInnes b, B. Norris
+ Future Generation Computer Systems, Vol 20, 2004, pp 373–387
+
+ + Note that if an iterator is passed to this solver it will be used for all the sub-solvers. + +
+
+ + + The collection of solvers that will be used + + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A diagonal preconditioner. The preconditioner uses the inverse + of the matrix diagonal as preconditioning values. + + + + + The inverse of the matrix diagonal. + + + + + Returns the decomposed matrix diagonal. + + The matrix diagonal. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + A Generalized Product Bi-Conjugate Gradient iterative matrix solver. + + + + The Generalized Product Bi-Conjugate Gradient (GPBiCG) solver is an + alternative version of the Bi-Conjugate Gradient stabilized (CG) solver. + Unlike the CG solver the GPBiCG solver can be used on + non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The GPBiCG algorithm was taken from:
+ GPBiCG(m,l): A hybrid of BiCGSTAB and GPBiCG methods with + efficiency and robustness +
+ S. Fujino +
+ Applied Numerical Mathematics, Volume 41, 2002, pp 107 - 117 +
+
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Indicates the number of BiCGStab steps should be taken + before switching. + + + + + Indicates the number of GPBiCG steps should be taken + before switching. + + + + + Gets or sets the number of steps taken with the BiCgStab algorithm + before switching over to the GPBiCG algorithm. + + + + + Gets or sets the number of steps taken with the GPBiCG algorithm + before switching over to the BiCgStab algorithm. + + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Decide if to do steps with BiCgStab + + Number of iteration + true if yes, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + An incomplete, level 0, LU factorization preconditioner. + + + The ILU(0) algorithm was taken from:
+ Iterative methods for sparse linear systems
+ Yousef Saad
+ Algorithm is described in Chapter 10, section 10.3.2, page 275
+
+
+ + + The matrix holding the lower (L) and upper (U) matrices. The + decomposition matrices are combined to reduce storage. + + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + A new matrix containing the lower triagonal elements. + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + This class performs an Incomplete LU factorization with drop tolerance + and partial pivoting. The drop tolerance indicates which additional entries + will be dropped from the factorized LU matrices. + + + The ILUTP-Mem algorithm was taken from:
+ ILUTP_Mem: a Space-Efficient Incomplete LU Preconditioner +
+ Tzu-Yi Chen, Department of Mathematics and Computer Science,
+ Pomona College, Claremont CA 91711, USA
+ Published in:
+ Lecture Notes in Computer Science
+ Volume 3046 / 2004
+ pp. 20 - 28
+ Algorithm is described in Section 2, page 22 +
+
+ + + The default fill level. + + + + + The default drop tolerance. + + + + + The decomposed upper triangular matrix. + + + + + The decomposed lower triangular matrix. + + + + + The array containing the pivot values. + + + + + The fill level. + + + + + The drop tolerance. + + + + + The pivot tolerance. + + + + + Initializes a new instance of the class with the default settings. + + + + + Initializes a new instance of the class with the specified settings. + + + The amount of fill that is allowed in the matrix. The value is a fraction of + the number of non-zero entries in the original matrix. Values should be positive. + + + The absolute drop tolerance which indicates below what absolute value an entry + will be dropped from the matrix. A drop tolerance of 0.0 means that no values + will be dropped. Values should always be positive. + + + The pivot tolerance which indicates at what level pivoting will take place. A + value of 0.0 means that no pivoting will take place. + + + + + Gets or sets the amount of fill that is allowed in the matrix. The + value is a fraction of the number of non-zero entries in the original + matrix. The standard value is 200. + + + + Values should always be positive and can be higher than 1.0. A value lower + than 1.0 means that the eventual preconditioner matrix will have fewer + non-zero entries as the original matrix. A value higher than 1.0 means that + the eventual preconditioner can have more non-zero values than the original + matrix. + + + Note that any changes to the FillLevel after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the absolute drop tolerance which indicates below what absolute value + an entry will be dropped from the matrix. The standard value is 0.0001. + + + + The values should always be positive and can be larger than 1.0. A low value will + keep more small numbers in the preconditioner matrix. A high value will remove + more small numbers from the preconditioner matrix. + + + Note that any changes to the DropTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the pivot tolerance which indicates at what level pivoting will + take place. The standard value is 0.0 which means pivoting will never take place. + + + + The pivot tolerance is used to calculate if pivoting is necessary. Pivoting + will take place if any of the values in a row is bigger than the + diagonal value of that row divided by the pivot tolerance, i.e. pivoting + will take place if row(i,j) > row(i,i) / PivotTolerance for + any j that is not equal to i. + + + Note that any changes to the PivotTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the lower triagonal elements. + + + + Returns the pivot array. This array is not needed for normal use because + the preconditioner will return the solution vector values in the proper order. + + + This method is used for debugging purposes only and should normally not be used. + + The pivot array. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. Note that the + method takes a general matrix type. However internally the data is stored + as a sparse matrix. Therefore it is not recommended to pass a dense matrix. + + If is . + If is not a square matrix. + + + + Pivot elements in the according to internal pivot array + + Row to pivot in + + + + Was pivoting already performed + + Pivots already done + Current item to pivot + true if performed, otherwise false + + + + Swap columns in the + + Source . + First column index to swap + Second column index to swap + + + + Sort vector descending, not changing vector but placing sorted indicies to + + Start sort form + Sort till upper bound + Array with sorted vector indicies + Source + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + Pivot elements in according to internal pivot array + + Source . + Result after pivoting. + + + + An element sort algorithm for the class. + + + This sort algorithm is used to sort the columns in a sparse matrix based on + the value of the element on the diagonal of the matrix. + + + + + Sorts the elements of the vector in decreasing + fashion. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Sorts the elements of the vector in decreasing + fashion using heap sort algorithm. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Build heap for double indicies + + Root position + Length of + Indicies of + Target + + + + Sift double indicies + + Indicies of + Target + Root position + Length of + + + + Sorts the given integers in a decreasing fashion. + + The values. + + + + Sort the given integers in a decreasing fashion using heapsort algorithm + + Array of values to sort + Length of + + + + Build heap + + Target values array + Root position + Length of + + + + Sift values + + Target value array + Root position + Length of + + + + Exchange values in array + + Target values array + First value to exchange + Second value to exchange + + + + A simple milu(0) preconditioner. + + + Original Fortran code by Youcef Saad (07 January 2004) + + + + Use modified or standard ILU(0) + + + + Gets or sets a value indicating whether to use modified or standard ILU(0). + + + + + Gets a value indicating whether the preconditioner is initialized. + + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square or is not an + instance of SparseCompressedRowMatrixStorage. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector b. + The left hand side vector x. + + + + MILU0 is a simple milu(0) preconditioner. + + Order of the matrix. + Matrix values in CSR format (input). + Column indices (input). + Row pointers (input). + Matrix values in MSR format (output). + Row pointers and column indices (output). + Pointer to diagonal elements (output). + True if the modified/MILU algorithm should be used (recommended) + Returns 0 on success or k > 0 if a zero pivot was encountered at step k. + + + + A Multiple-Lanczos Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Multiple-Lanczos Bi-Conjugate Gradient stabilized (ML(k)-BiCGStab) solver is an 'improvement' + of the standard BiCgStab solver. + + + The algorithm was taken from:
+ ML(k)BiCGSTAB: A BiCGSTAB variant based on multiple Lanczos starting vectors +
+ Man-chung Yeung and Tony F. Chan +
+ SIAM Journal of Scientific Computing +
+ Volume 21, Number 4, pp. 1263 - 1290 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + The default number of starting vectors. + + + + + The collection of starting vectors which are used as the basis for the Krylov sub-space. + + + + + The number of starting vectors used by the algorithm + + + + + Gets or sets the number of starting vectors. + + + Must be larger than 1 and smaller than the number of variables in the matrix that + for which this solver will be used. + + + + + Resets the number of starting vectors to the default value. + + + + + Gets or sets a series of orthonormal vectors which will be used as basis for the + Krylov sub-space. + + + + + Gets the number of starting vectors to create + + Maximum number + Number of variables + Number of starting vectors to create + + + + Returns an array of starting vectors. + + The maximum number of starting vectors that should be created. + The number of variables. + + An array with starting vectors. The array will never be larger than the + but it may be smaller if + the is smaller than + the . + + + + + Create random vecrors array + + Number of vectors + Size of each vector + Array of random vectors + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Source A. + Residual data. + x data. + b data. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Transpose Free Quasi-Minimal Residual (TFQMR) iterative matrix solver. + + + + The TFQMR algorithm was taken from:
+ Iterative methods for sparse linear systems. +
+ Yousef Saad +
+ Algorithm is described in Chapter 7, section 7.4.3, page 219 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Is even? + + Number to check + true if even, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Matrix with sparse storage, intended for very large matrices where most of the cells are zero. + The underlying storage scheme is 3-array compressed-sparse-row (CSR) Format. + Wikipedia - CSR. + + + + + Gets the number of non zero elements in the matrix. + + The number of non zero elements. + + + + Create a new sparse matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new sparse matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable. + The enumerable is assumed to be in row-major order (row by row). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + + Create a new sparse matrix with the given number of rows and columns as a copy of the given array. + The array is assumed to be in column-major order (column by column). + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix and initialize each value to the same provided value. + + + + + Create a new sparse matrix and initialize each value using the provided init function. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Evaluates whether this matrix is symmetric. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + A vector with sparse storage, intended for very large vectors where most of the cells are zero. + + The sparse vector is not thread safe. + + + + Gets the number of non zero elements in the vector. + + The number of non zero elements. + + + + Create a new sparse vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new sparse vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new sparse vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector and initialize each value using the provided value. + + + + + Create a new sparse vector and initialize each value using the provided init function. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + Warning, the new 'sparse vector' with a non-zero scalar added to it will be a 100% filled + sparse vector and very inefficient. Would be better to work with a dense vector instead. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Negates vector and saves result to + + Target vector + + + + Conjugates vector and save result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Multiplies a vector with a complex. + + The vector to scale. + The complex value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a complex. + + The complex value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a complex. + + The vector to divide. + The complex value. + The result of the division. + If is . + + + + Computes the modulus of each element of the vector of the given divisor. + + The vector whose elements we want to compute the modulus of. + The divisor to use, + The result of the calculation + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Creates a double sparse vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n;n;..', '(n;n;..)', '[n;n;...]', where n is a Complex. + + + A double sparse vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a complex sparse vector to double-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a complex sparse vector to double-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Complex version of the class. + + + + + Initializes a new instance of the Vector class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Conjugates vector and save result to + + Target vector + + + + Negates vector and saves result to + + Target vector + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Divides each element of the vector by a scalar and stores the result in the result vector. + + + The scalar to divide with. + + + The vector to store the result of the division. + + + + + Divides a scalar by each element of the vector and stores the result in the result vector. + + The scalar to divide. + The vector to store the result of the division. + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise raise this vector to an exponent and store the result into the result vector. + + The exponent to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Returns the value of the absolute minimum element. + + The value of the absolute minimum element. + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the value of the absolute maximum element. + + The value of the absolute maximum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + + The p value. + + + Scalar ret = ( ∑|At(i)|^p )^(1/p) + + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Normalizes this vector to a unit vector with respect to the p-norm. + + + The p value. + + + This vector normalized to a unit vector with respect to the p-norm. + + + + + A Matrix class with dense storage. The underlying storage is a one dimensional array in column-major order (column by column). + + + + + Number of rows. + + Using this instead of the RowCount property to speed up calculating + a matrix index in the data array. + + + + Number of columns. + + Using this instead of the ColumnCount property to speed up calculating + a matrix index in the data array. + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new dense matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new dense matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to be in column-major order (column by column) and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + + Create a new dense matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable. + The enumerable is assumed to be in column-major order (column by column). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix and initialize each value to the same provided value. + + + + + Create a new dense matrix and initialize each value using the provided init function. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new dense matrix with values sampled from the provided random distribution. + + + + + Gets the matrix's data. + + The matrix's data. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of add + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the matrix and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Evaluates whether this matrix is symmetric. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A vector using dense storage. + + + + + Number of elements + + + + + Gets the vector's data. + + + + + Create a new dense vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new dense vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new dense vector directly binding to a raw array. + The array is used directly without copying. + Very efficient, but changes to the array and the vector will affect each other. + + + + + Create a new dense vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector and initialize each value using the provided value. + + + + + Create a new dense vector and initialize each value using the provided init function. + + + + + Create a new dense vector with values sampled from the provided random distribution. + + + + + Gets the vector's data. + + The vector's data. + + + + Returns a reference to the internal data structure. + + The DenseVector whose internal data we are + returning. + + A reference to the internal date of the given vector. + + + + + Returns a vector bound directly to a reference of the provided array. + + The array to bind to the DenseVector object. + + A DenseVector whose values are bound to the given array. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + The scalar to add. + The vector to store the result of the addition. + + + + Adds another vector to this vector and stores the result into the result vector. + + The vector to add to this one. + The vector to store the result of the addition. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The vector to store the result of the subtraction. + + + + Subtracts another vector from this vector and stores the result into the result vector. + + The vector to subtract from this one. + The vector to store the result of the subtraction. + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Negates vector and saves result to + + Target vector + + + + Conjugates vector and save result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + The scalar to multiply. + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Multiplies a vector with a complex. + + The vector to scale. + The Complex32 value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a complex. + + The Complex32 value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a complex. + + The vector to divide. + The Complex32 value. + The result of the division. + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Creates a Complex32 dense vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n;n;..', '(n;n;..)', '[n;n;...]', where n is a double. + + + A Complex32 dense vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a complex dense vector to double-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a complex dense vector to double-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + A matrix type for diagonal matrices. + + + Diagonal matrices can be non-square matrices but the diagonal always starts + at element 0,0. A diagonal matrix will throw an exception if non diagonal + entries are set. The exception to this is when the off diagonal elements are + 0.0 or NaN; these settings will cause no change to the diagonal matrix. + + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new diagonal matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All diagonal cells of the matrix will be initialized to the provided value, all non-diagonal ones to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to contain the diagonal elements only and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new diagonal matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + The matrix to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + The array to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided enumerable. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new diagonal matrix with diagonal values sampled from the provided random distribution. + + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to add. + The matrix to store the result of the division. + + + + Computes the determinant of this matrix. + + The determinant of this matrix. + + + + Returns the elements of the diagonal in a . + + The elements of the diagonal. + For non-square matrices, the method returns Min(Rows, Columns) elements where + i == j (i is the row index, and j is the column index). + + + + Copies the values of the given array to the diagonal. + + The array to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + + Copies the values of the given to the diagonal. + + The vector to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced L2 norm of the matrix. + The largest singular value of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + Calculates the condition number of this matrix. + The condition number of the matrix. + + + Computes the inverse of this matrix. + If is not a square matrix. + If is singular. + The inverse of this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Creates a matrix that contains the values from the requested sub-matrix. + + The row to start copying from. + The number of rows to copy. Must be positive. + The column to start copying from. + The number of columns to copy. Must be positive. + The requested sub-matrix. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + If or + is not positive. + + + + Permute the columns of a matrix according to a permutation. + + The column permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Permute the rows of a matrix according to a permutation. + + The row permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Evaluates whether this matrix is symmetric. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A class which encapsulates the functionality of a Cholesky factorization. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Gets the determinant of the matrix for which the Cholesky matrix was computed. + + + + + Gets the log determinant of the matrix for which the Cholesky matrix was computed. + + + + + A class which encapsulates the functionality of a Cholesky factorization for dense matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a complex matrix. + + + If A is hermitan, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is hermitan. + I.e. A = V*D*V' and V*VH=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Reduces a complex hermitian matrix to a real symmetric tridiagonal matrix using unitary similarity transformations. + + Source matrix to reduce + Output: Arrays for internal storage of real parts of eigenvalues + Output: Arrays for internal storage of imaginary parts of eigenvalues + Output: Arrays that contains further information about the transformations. + Order of initial matrix + This is derived from the Algol procedures HTRIDI by + Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + Data array of matrix V (eigenvectors) + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Determines eigenvectors by undoing the symmetric tridiagonalize transformation + + Data array of matrix V (eigenvectors) + Previously tridiagonalized matrix by . + Contains further information about the transformations + Input matrix order + This is derived from the Algol procedures HTRIBK, by + by Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Nonsymmetric reduction to Hessenberg form. + + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + Data array of the eigenvectors + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any complex square matrix A may be decomposed as A = QR where Q is an unitary mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an unitary matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Factorize matrix using the modified Gram-Schmidt method. + + Initial matrix. On exit is replaced by Q. + Number of rows in Q. + Number of columns in Q. + On exit is filled by R. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Gets or sets Tau vector. Contains additional information on Q - used for native solver. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The QR factorization method to use. + If is null. + If row count is less then column count + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + If SVD algorithm failed to converge with matrix . + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Gets the absolute value of determinant of the square matrix for which the EVD was computed. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + In the Math.Net implementation we also store a set of pivot elements for increased + numerical stability. The pivot elements encode a permutation matrix P such that P*A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Gets the determinant of the matrix for which the LU factorization was computed. + + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A (m x n) may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + If a factorization is peformed, the resulting Q matrix is an m x m matrix + and the R matrix is an m x n matrix. If a factorization is performed, the + resulting Q matrix is an m x n matrix and the R matrix is an n x n matrix. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD). + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets the two norm of the . + + The 2-norm of the . + + + + Gets the condition number max(S) / min(S) + + The condition number. + + + + Gets the determinant of the square matrix for which the SVD was computed. + + + + + A class which encapsulates the functionality of a Cholesky factorization for user matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a complex matrix. + + + If A is hermitan, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is hermitan. + I.e. A = V*D*V' and V*VH=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Reduces a complex hermitian matrix to a real symmetric tridiagonal matrix using unitary similarity transformations. + + Source matrix to reduce + Output: Arrays for internal storage of real parts of eigenvalues + Output: Arrays for internal storage of imaginary parts of eigenvalues + Output: Arrays that contains further information about the transformations. + Order of initial matrix + This is derived from the Algol procedures HTRIDI by + Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + The eigen vectors to work on. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Determines eigenvectors by undoing the symmetric tridiagonalize transformation + + The eigen vectors to work on. + Previously tridiagonalized matrix by . + Contains further information about the transformations + Input matrix order + This is derived from the Algol procedures HTRIBK, by + by Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Nonsymmetric reduction to Hessenberg form. + + The eigen vectors to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + The eigen vectors to work on. + The eigen values to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any complex square matrix A may be decomposed as A = QR where Q is an unitary mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an unitary matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The QR factorization method to use. + If is null. + + + + Generate column from initial matrix to work array + + Initial matrix + The first row + Column index + Generated vector + + + + Perform calculation of Q or R + + Work array + Q or R matrices + The first row + The last row + The first column + The last column + Number of available CPUs + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + + + + + Calculates absolute value of multiplied on signum function of + + Complex32 value z1 + Complex32 value z2 + Result multiplication of signum function and absolute value + + + + Interchanges two vectors and + + Source matrix + The number of rows in + Column A index to swap + Column B index to swap + + + + Scale column by starting from row + + Source matrix + The number of rows in + Column to scale + Row to scale from + Scale value + + + + Scale vector by starting from index + + Source vector + Row to scale from + Scale value + + + + Given the Cartesian coordinates (da, db) of a point p, these fucntion return the parameters da, db, c, and s + associated with the Givens rotation that zeros the y-coordinate of the point. + + Provides the x-coordinate of the point p. On exit contains the parameter r associated with the Givens rotation + Provides the y-coordinate of the point p. On exit contains the parameter z associated with the Givens rotation + Contains the parameter c associated with the Givens rotation + Contains the parameter s associated with the Givens rotation + This is equivalent to the DROTG LAPACK routine. + + + + Calculate Norm 2 of the column in matrix starting from row + + Source matrix + The number of rows in + Column index + Start row index + Norm2 (Euclidean norm) of the column + + + + Calculate Norm 2 of the vector starting from index + + Source vector + Start index + Norm2 (Euclidean norm) of the vector + + + + Calculate dot product of and conjugating the first vector. + + Source matrix + The number of rows in + Index of column A + Index of column B + Starting row index + Dot product value + + + + Performs rotation of points in the plane. Given two vectors x and y , + each vector element of these vectors is replaced as follows: x(i) = c*x(i) + s*y(i); y(i) = c*y(i) - s*x(i) + + Source matrix + The number of rows in + Index of column A + Index of column B + scalar cos value + scalar sin value + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Complex32 version of the class. + + + + + Initializes a new instance of the Matrix class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Returns the conjugate transpose of this matrix. + + The conjugate transpose of this matrix. + + + + Puts the conjugate transpose of this matrix into the result matrix. + + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to divide by each element of the matrix. + The matrix to store the result of the division. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The matrix to store the result of the pointwise power. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Pointwise applies the exponential function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Computes the Moore-Penrose Pseudo-Inverse of this matrix. + + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Calculates the p-norms of all row vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the p-norms of all column vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all row vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all column vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the value sum of each row vector. + + + + + Calculates the absolute value sum of each row vector. + + + + + Calculates the value sum of each column vector. + + + + + Calculates the absolute value sum of each column vector. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Bi-Conjugate Gradient Stabilized (BiCGStab) solver is an 'improvement' + of the standard Conjugate Gradient (CG) solver. Unlike the CG solver the + BiCGStab can be used on non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The Bi-CGSTAB algorithm was taken from:
+ Templates for the solution of linear systems: Building blocks + for iterative methods +
+ Richard Barrett, Michael Berry, Tony F. Chan, James Demmel, + June M. Donato, Jack Dongarra, Victor Eijkhout, Roldan Pozo, + Charles Romine and Henk van der Vorst +
+ Url: http://www.netlib.org/templates/Templates.html +
+ Algorithm is described in Chapter 2, section 2.3.8, page 27 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient , A. + The solution , b. + The result , x. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A composite matrix solver. The actual solver is made by a sequence of + matrix solvers. + + + + Solver based on:
+ Faster PDE-based simulations using robust composite linear solvers
+ S. Bhowmicka, P. Raghavan a,*, L. McInnes b, B. Norris
+ Future Generation Computer Systems, Vol 20, 2004, pp 373–387
+
+ + Note that if an iterator is passed to this solver it will be used for all the sub-solvers. + +
+
+ + + The collection of solvers that will be used + + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A diagonal preconditioner. The preconditioner uses the inverse + of the matrix diagonal as preconditioning values. + + + + + The inverse of the matrix diagonal. + + + + + Returns the decomposed matrix diagonal. + + The matrix diagonal. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + A Generalized Product Bi-Conjugate Gradient iterative matrix solver. + + + + The Generalized Product Bi-Conjugate Gradient (GPBiCG) solver is an + alternative version of the Bi-Conjugate Gradient stabilized (CG) solver. + Unlike the CG solver the GPBiCG solver can be used on + non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The GPBiCG algorithm was taken from:
+ GPBiCG(m,l): A hybrid of BiCGSTAB and GPBiCG methods with + efficiency and robustness +
+ S. Fujino +
+ Applied Numerical Mathematics, Volume 41, 2002, pp 107 - 117 +
+
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Indicates the number of BiCGStab steps should be taken + before switching. + + + + + Indicates the number of GPBiCG steps should be taken + before switching. + + + + + Gets or sets the number of steps taken with the BiCgStab algorithm + before switching over to the GPBiCG algorithm. + + + + + Gets or sets the number of steps taken with the GPBiCG algorithm + before switching over to the BiCgStab algorithm. + + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Decide if to do steps with BiCgStab + + Number of iteration + true if yes, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + An incomplete, level 0, LU factorization preconditioner. + + + The ILU(0) algorithm was taken from:
+ Iterative methods for sparse linear systems
+ Yousef Saad
+ Algorithm is described in Chapter 10, section 10.3.2, page 275
+
+
+ + + The matrix holding the lower (L) and upper (U) matrices. The + decomposition matrices are combined to reduce storage. + + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + A new matrix containing the lower triagonal elements. + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + This class performs an Incomplete LU factorization with drop tolerance + and partial pivoting. The drop tolerance indicates which additional entries + will be dropped from the factorized LU matrices. + + + The ILUTP-Mem algorithm was taken from:
+ ILUTP_Mem: a Space-Efficient Incomplete LU Preconditioner +
+ Tzu-Yi Chen, Department of Mathematics and Computer Science,
+ Pomona College, Claremont CA 91711, USA
+ Published in:
+ Lecture Notes in Computer Science
+ Volume 3046 / 2004
+ pp. 20 - 28
+ Algorithm is described in Section 2, page 22 +
+
+ + + The default fill level. + + + + + The default drop tolerance. + + + + + The decomposed upper triangular matrix. + + + + + The decomposed lower triangular matrix. + + + + + The array containing the pivot values. + + + + + The fill level. + + + + + The drop tolerance. + + + + + The pivot tolerance. + + + + + Initializes a new instance of the class with the default settings. + + + + + Initializes a new instance of the class with the specified settings. + + + The amount of fill that is allowed in the matrix. The value is a fraction of + the number of non-zero entries in the original matrix. Values should be positive. + + + The absolute drop tolerance which indicates below what absolute value an entry + will be dropped from the matrix. A drop tolerance of 0.0 means that no values + will be dropped. Values should always be positive. + + + The pivot tolerance which indicates at what level pivoting will take place. A + value of 0.0 means that no pivoting will take place. + + + + + Gets or sets the amount of fill that is allowed in the matrix. The + value is a fraction of the number of non-zero entries in the original + matrix. The standard value is 200. + + + + Values should always be positive and can be higher than 1.0. A value lower + than 1.0 means that the eventual preconditioner matrix will have fewer + non-zero entries as the original matrix. A value higher than 1.0 means that + the eventual preconditioner can have more non-zero values than the original + matrix. + + + Note that any changes to the FillLevel after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the absolute drop tolerance which indicates below what absolute value + an entry will be dropped from the matrix. The standard value is 0.0001. + + + + The values should always be positive and can be larger than 1.0. A low value will + keep more small numbers in the preconditioner matrix. A high value will remove + more small numbers from the preconditioner matrix. + + + Note that any changes to the DropTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the pivot tolerance which indicates at what level pivoting will + take place. The standard value is 0.0 which means pivoting will never take place. + + + + The pivot tolerance is used to calculate if pivoting is necessary. Pivoting + will take place if any of the values in a row is bigger than the + diagonal value of that row divided by the pivot tolerance, i.e. pivoting + will take place if row(i,j) > row(i,i) / PivotTolerance for + any j that is not equal to i. + + + Note that any changes to the PivotTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the lower triagonal elements. + + + + Returns the pivot array. This array is not needed for normal use because + the preconditioner will return the solution vector values in the proper order. + + + This method is used for debugging purposes only and should normally not be used. + + The pivot array. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. Note that the + method takes a general matrix type. However internally the data is stored + as a sparse matrix. Therefore it is not recommended to pass a dense matrix. + + If is . + If is not a square matrix. + + + + Pivot elements in the according to internal pivot array + + Row to pivot in + + + + Was pivoting already performed + + Pivots already done + Current item to pivot + true if performed, otherwise false + + + + Swap columns in the + + Source . + First column index to swap + Second column index to swap + + + + Sort vector descending, not changing vector but placing sorted indicies to + + Start sort form + Sort till upper bound + Array with sorted vector indicies + Source + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + Pivot elements in according to internal pivot array + + Source . + Result after pivoting. + + + + An element sort algorithm for the class. + + + This sort algorithm is used to sort the columns in a sparse matrix based on + the value of the element on the diagonal of the matrix. + + + + + Sorts the elements of the vector in decreasing + fashion. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Sorts the elements of the vector in decreasing + fashion using heap sort algorithm. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Build heap for double indicies + + Root position + Length of + Indicies of + Target + + + + Sift double indicies + + Indicies of + Target + Root position + Length of + + + + Sorts the given integers in a decreasing fashion. + + The values. + + + + Sort the given integers in a decreasing fashion using heapsort algorithm + + Array of values to sort + Length of + + + + Build heap + + Target values array + Root position + Length of + + + + Sift values + + Target value array + Root position + Length of + + + + Exchange values in array + + Target values array + First value to exchange + Second value to exchange + + + + A simple milu(0) preconditioner. + + + Original Fortran code by Youcef Saad (07 January 2004) + + + + Use modified or standard ILU(0) + + + + Gets or sets a value indicating whether to use modified or standard ILU(0). + + + + + Gets a value indicating whether the preconditioner is initialized. + + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square or is not an + instance of SparseCompressedRowMatrixStorage. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector b. + The left hand side vector x. + + + + MILU0 is a simple milu(0) preconditioner. + + Order of the matrix. + Matrix values in CSR format (input). + Column indices (input). + Row pointers (input). + Matrix values in MSR format (output). + Row pointers and column indices (output). + Pointer to diagonal elements (output). + True if the modified/MILU algorithm should be used (recommended) + Returns 0 on success or k > 0 if a zero pivot was encountered at step k. + + + + A Multiple-Lanczos Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Multiple-Lanczos Bi-Conjugate Gradient stabilized (ML(k)-BiCGStab) solver is an 'improvement' + of the standard BiCgStab solver. + + + The algorithm was taken from:
+ ML(k)BiCGSTAB: A BiCGSTAB variant based on multiple Lanczos starting vectors +
+ Man-chung Yeung and Tony F. Chan +
+ SIAM Journal of Scientific Computing +
+ Volume 21, Number 4, pp. 1263 - 1290 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + The default number of starting vectors. + + + + + The collection of starting vectors which are used as the basis for the Krylov sub-space. + + + + + The number of starting vectors used by the algorithm + + + + + Gets or sets the number of starting vectors. + + + Must be larger than 1 and smaller than the number of variables in the matrix that + for which this solver will be used. + + + + + Resets the number of starting vectors to the default value. + + + + + Gets or sets a series of orthonormal vectors which will be used as basis for the + Krylov sub-space. + + + + + Gets the number of starting vectors to create + + Maximum number + Number of variables + Number of starting vectors to create + + + + Returns an array of starting vectors. + + The maximum number of starting vectors that should be created. + The number of variables. + + An array with starting vectors. The array will never be larger than the + but it may be smaller if + the is smaller than + the . + + + + + Create random vectors array + + Number of vectors + Size of each vector + Array of random vectors + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Source A. + Residual data. + x data. + b data. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Transpose Free Quasi-Minimal Residual (TFQMR) iterative matrix solver. + + + + The TFQMR algorithm was taken from:
+ Iterative methods for sparse linear systems. +
+ Yousef Saad +
+ Algorithm is described in Chapter 7, section 7.4.3, page 219 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Is even? + + Number to check + true if even, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Matrix with sparse storage, intended for very large matrices where most of the cells are zero. + The underlying storage scheme is 3-array compressed-sparse-row (CSR) Format. + Wikipedia - CSR. + + + + + Gets the number of non zero elements in the matrix. + + The number of non zero elements. + + + + Create a new sparse matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new sparse matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable. + The enumerable is assumed to be in row-major order (row by row). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + + Create a new sparse matrix with the given number of rows and columns as a copy of the given array. + The array is assumed to be in column-major order (column by column). + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix and initialize each value to the same provided value. + + + + + Create a new sparse matrix and initialize each value using the provided init function. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Evaluates whether this matrix is symmetric. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + A vector with sparse storage, intended for very large vectors where most of the cells are zero. + + The sparse vector is not thread safe. + + + + Gets the number of non zero elements in the vector. + + The number of non zero elements. + + + + Create a new sparse vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new sparse vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new sparse vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector and initialize each value using the provided value. + + + + + Create a new sparse vector and initialize each value using the provided init function. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + Warning, the new 'sparse vector' with a non-zero scalar added to it will be a 100% filled + sparse vector and very inefficient. Would be better to work with a dense vector instead. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Negates vector and saves result to + + Target vector + + + + Conjugates vector and save result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Multiplies a vector with a complex. + + The vector to scale. + The complex value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a complex. + + The complex value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a complex. + + The vector to divide. + The complex value. + The result of the division. + If is . + + + + Computes the modulus of each element of the vector of the given divisor. + + The vector whose elements we want to compute the modulus of. + The divisor to use, + The result of the calculation + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Creates a double sparse vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n;n;..', '(n;n;..)', '[n;n;...]', where n is a Complex32. + + + A double sparse vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a complex sparse vector to double-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a complex sparse vector to double-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Complex32 version of the class. + + + + + Initializes a new instance of the Vector class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Conjugates vector and save result to + + Target vector + + + + Negates vector and saves result to + + Target vector + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Divides each element of the vector by a scalar and stores the result in the result vector. + + + The scalar to divide with. + + + The vector to store the result of the division. + + + + + Divides a scalar by each element of the vector and stores the result in the result vector. + + The scalar to divide. + The vector to store the result of the division. + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise raise this vector to an exponent and store the result into the result vector. + + The exponent to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Returns the value of the absolute minimum element. + + The value of the absolute minimum element. + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the value of the absolute maximum element. + + The value of the absolute maximum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + + The p value. + + + Scalar ret = ( ∑|At(i)|^p )^(1/p) + + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Normalizes this vector to a unit vector with respect to the p-norm. + + + The p value. + + + This vector normalized to a unit vector with respect to the p-norm. + + + + + Generic linear algebra type builder, for situations where a matrix or vector + must be created in a generic way. Usage of generic builders should not be + required in normal user code. + + + + + Gets the value of 0.0 for type T. + + + + + Gets the value of 1.0 for type T. + + + + + Create a new matrix straight from an initialized matrix storage instance. + If you have an instance of a discrete storage type instead, use their direct methods instead. + + + + + Create a new matrix with the same kind of the provided example. + + + + + Create a new matrix with the same kind and dimensions of the provided example. + + + + + Create a new matrix with the same kind of the provided example. + + + + + Create a new matrix with a type that can represent and is closest to both provided samples. + + + + + Create a new matrix with a type that can represent and is closest to both provided samples and the dimensions of example. + + + + + Create a new dense matrix with values sampled from the provided random distribution. + + + + + Create a new dense matrix with values sampled from the standard distribution with a system random source. + + + + + Create a new dense matrix with values sampled from the standard distribution with a system random source. + + + + + Create a new positive definite dense matrix where each value is the product + of two samples from the provided random distribution. + + + + + Create a new positive definite dense matrix where each value is the product + of two samples from the standard distribution. + + + + + Create a new positive definite dense matrix where each value is the product + of two samples from the provided random distribution. + + + + + Create a new dense matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + + + + Create a new dense matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to be in column-major order (column by column) and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + + Create a new dense matrix and initialize each value to the same provided value. + + + + + Create a new dense matrix and initialize each value using the provided init function. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value using the provided init function. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new dense matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable. + The enumerable is assumed to be in column-major order (column by column). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable. + The enumerable is assumed to be in row-major order (row by row). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix from a 2D array of existing matrices. + The matrices in the array are not required to be dense already. + If the matrices do not align properly, they are placed on the top left + corner of their cell with the remaining fields left zero. + + + + + Create a new sparse matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a sparse matrix of T with the given number of rows and columns. + + The number of rows. + The number of columns. + + + + Create a new sparse matrix and initialize each value to the same provided value. + + + + + Create a new sparse matrix and initialize each value using the provided init function. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value using the provided init function. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new sparse matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable. + The enumerable is assumed to be in row-major order (row by row). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + + Create a new sparse matrix with the given number of rows and columns as a copy of the given array. + The array is assumed to be in column-major order (column by column). + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix from a 2D array of existing matrices. + The matrices in the array are not required to be sparse already. + If the matrices do not align properly, they are placed on the top left + corner of their cell with the remaining fields left zero. + + + + + Create a new diagonal matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + + + + Create a new diagonal matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to represent the diagonal values and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new square diagonal matrix directly binding to a raw array. + The array is assumed to represent the diagonal values and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new diagonal matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal matrix and initialize each diagonal value using the provided init function. + + + + + Create a new diagonal identity matrix with a one-diagonal. + + + + + Create a new diagonal identity matrix with a one-diagonal. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Generic linear algebra type builder, for situations where a matrix or vector + must be created in a generic way. Usage of generic builders should not be + required in normal user code. + + + + + Gets the value of 0.0 for type T. + + + + + Gets the value of 1.0 for type T. + + + + + Create a new vector straight from an initialized matrix storage instance. + If you have an instance of a discrete storage type instead, use their direct methods instead. + + + + + Create a new vector with the same kind of the provided example. + + + + + Create a new vector with the same kind and dimension of the provided example. + + + + + Create a new vector with the same kind of the provided example. + + + + + Create a new vector with a type that can represent and is closest to both provided samples. + + + + + Create a new vector with a type that can represent and is closest to both provided samples and the dimensions of example. + + + + + Create a new vector with a type that can represent and is closest to both provided samples. + + + + + Create a new dense vector with values sampled from the provided random distribution. + + + + + Create a new dense vector with values sampled from the standard distribution with a system random source. + + + + + Create a new dense vector with values sampled from the standard distribution with a system random source. + + + + + Create a new dense vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a dense vector of T with the given size. + + The size of the vector. + + + + Create a dense vector of T that is directly bound to the specified array. + + + + + Create a new dense vector and initialize each value using the provided value. + + + + + Create a new dense vector and initialize each value using the provided init function. + + + + + Create a new dense vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a sparse vector of T with the given size. + + The size of the vector. + + + + Create a new sparse vector and initialize each value using the provided value. + + + + + Create a new sparse vector and initialize each value using the provided init function. + + + + + Create a new sparse vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new matrix straight from an initialized matrix storage instance. + If you have an instance of a discrete storage type instead, use their direct methods instead. + + + + + Create a new matrix with the same kind of the provided example. + + + + + Create a new matrix with the same kind and dimensions of the provided example. + + + + + Create a new matrix with the same kind of the provided example. + + + + + Create a new matrix with a type that can represent and is closest to both provided samples. + + + + + Create a new matrix with a type that can represent and is closest to both provided samples and the dimensions of example. + + + + + Create a new dense matrix with values sampled from the provided random distribution. + + + + + Create a new dense matrix with values sampled from the standard distribution with a system random source. + + + + + Create a new dense matrix with values sampled from the standard distribution with a system random source. + + + + + Create a new positive definite dense matrix where each value is the product + of two samples from the provided random distribution. + + + + + Create a new positive definite dense matrix where each value is the product + of two samples from the standard distribution. + + + + + Create a new positive definite dense matrix where each value is the product + of two samples from the provided random distribution. + + + + + Create a new dense matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + + + + Create a new dense matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to be in column-major order (column by column) and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + + Create a new dense matrix and initialize each value to the same provided value. + + + + + Create a new dense matrix and initialize each value using the provided init function. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value using the provided init function. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new dense matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable. + The enumerable is assumed to be in column-major order (column by column). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix from a 2D array of existing matrices. + The matrices in the array are not required to be dense already. + If the matrices do not align properly, they are placed on the top left + corner of their cell with the remaining fields left zero. + + + + + Create a new sparse matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a sparse matrix of T with the given number of rows and columns. + + The number of rows. + The number of columns. + + + + Create a new sparse matrix and initialize each value to the same provided value. + + + + + Create a new sparse matrix and initialize each value using the provided init function. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value using the provided init function. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new sparse matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable. + The enumerable is assumed to be in row-major order (row by row). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + + Create a new sparse matrix with the given number of rows and columns as a copy of the given array. + The array is assumed to be in column-major order (column by column). + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix from a 2D array of existing matrices. + The matrices in the array are not required to be sparse already. + If the matrices do not align properly, they are placed on the top left + corner of their cell with the remaining fields left zero. + + + + + Create a new diagonal matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + + + + Create a new diagonal matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to represent the diagonal values and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new square diagonal matrix directly binding to a raw array. + The array is assumed to represent the diagonal values and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new diagonal matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal matrix and initialize each diagonal value using the provided init function. + + + + + Create a new diagonal identity matrix with a one-diagonal. + + + + + Create a new diagonal identity matrix with a one-diagonal. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new vector straight from an initialized matrix storage instance. + If you have an instance of a discrete storage type instead, use their direct methods instead. + + + + + Create a new vector with the same kind of the provided example. + + + + + Create a new vector with the same kind and dimension of the provided example. + + + + + Create a new vector with the same kind of the provided example. + + + + + Create a new vector with a type that can represent and is closest to both provided samples. + + + + + Create a new vector with a type that can represent and is closest to both provided samples and the dimensions of example. + + + + + Create a new vector with a type that can represent and is closest to both provided samples. + + + + + Create a new dense vector with values sampled from the provided random distribution. + + + + + Create a new dense vector with values sampled from the standard distribution with a system random source. + + + + + Create a new dense vector with values sampled from the standard distribution with a system random source. + + + + + Create a new dense vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a dense vector of T with the given size. + + The size of the vector. + + + + Create a dense vector of T that is directly bound to the specified array. + + + + + Create a new dense vector and initialize each value using the provided value. + + + + + Create a new dense vector and initialize each value using the provided init function. + + + + + Create a new dense vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a sparse vector of T with the given size. + + The size of the vector. + + + + Create a new sparse vector and initialize each value using the provided value. + + + + + Create a new sparse vector and initialize each value using the provided init function. + + + + + Create a new sparse vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + A class which encapsulates the functionality of a Cholesky factorization. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + Supported data types are double, single, , and . + + + + Gets the lower triangular form of the Cholesky matrix. + + + + + Gets the determinant of the matrix for which the Cholesky matrix was computed. + + + + + Gets the log determinant of the matrix for which the Cholesky matrix was computed. + + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + Supported data types are double, single, , and . + + + + Gets or sets a value indicating whether matrix is symmetric or not + + + + + Gets the absolute value of determinant of the square matrix for which the EVD was computed. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + Gets or sets the eigen values (λ) of matrix in ascending value. + + + + + Gets or sets eigenvectors. + + + + + Gets or sets the block diagonal eigenvalue matrix. + + + + + Solves a system of linear equations, AX = B, with A EVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, AX = B, with A EVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + Supported data types are double, single, , and . + + + + Classes that solves a system of linear equations, AX = B. + + Supported data types are double, single, , and . + + + + Solves a system of linear equations, AX = B. + + The right hand side Matrix, B. + The left hand side Matrix, X. + + + + Solves a system of linear equations, AX = B. + + The right hand side Matrix, B. + The left hand side Matrix, X. + + + + Solves a system of linear equations, Ax = b + + The right hand side vector, b. + The left hand side Vector, x. + + + + Solves a system of linear equations, Ax = b. + + The right hand side vector, b. + The left hand side Matrix>, x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + In the Math.Net implementation we also store a set of pivot elements for increased + numerical stability. The pivot elements encode a permutation matrix P such that P*A = L*U. + + + The computation of the LU factorization is done at construction time. + + Supported data types are double, single, , and . + + + + Gets the lower triangular factor. + + + + + Gets the upper triangular factor. + + + + + Gets the permutation applied to LU factorization. + + + + + Gets the determinant of the matrix for which the LU factorization was computed. + + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + The type of QR factorization go perform. + + + + + Compute the full QR factorization of a matrix. + + + + + Compute the thin QR factorization of a matrix. + + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A (m x n) may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + If a factorization is performed, the resulting Q matrix is an m x m matrix + and the R matrix is an m x n matrix. If a factorization is performed, the + resulting Q matrix is an m x n matrix and the R matrix is an n x n matrix. + + Supported data types are double, single, , and . + + + + Gets or sets orthogonal Q matrix + + + + + Gets the upper triangular factor R. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD). + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + Supported data types are double, single, , and . + + + Indicating whether U and VT matrices have been computed during SVD factorization. + + + + Gets the singular values (Σ) of matrix in ascending value. + + + + + Gets the left singular vectors (U - m-by-m unitary matrix) + + + + + Gets the transpose right singular vectors (transpose of V, an n-by-n unitary matrix) + + + + + Returns the singular values as a diagonal . + + The singular values as a diagonal . + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets the two norm of the . + + The 2-norm of the . + + + + Gets the condition number max(S) / min(S) + + The condition number. + + + + Gets the determinant of the square matrix for which the SVD was computed. + + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Defines the base class for Matrix classes. + + + Defines the base class for Matrix classes. + + Supported data types are double, single, , and . + + Defines the base class for Matrix classes. + + + Defines the base class for Matrix classes. + + + + + The value of 1.0. + + + + + The value of 0.0. + + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the matrix and stores the result in the result matrix. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts each element of the matrix from a scalar and stores the result in the result matrix. + + The scalar to subtract from. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar denominator to use. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar numerator to use. + The matrix to store the result of the division. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The matrix to store the result of the pointwise power. + + + + Pointwise raise this matrix to an exponent matrix and store the result into the result matrix. + + The exponent matrix to raise this matrix values to. + The matrix to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Adds a scalar to each element of the matrix. + + The scalar to add. + The result of the addition. + If the two matrices don't have the same dimensions. + + + + Adds a scalar to each element of the matrix and stores the result in the result matrix. + + The scalar to add. + The matrix to store the result of the addition. + If the two matrices don't have the same dimensions. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The result of the addition. + If the two matrices don't have the same dimensions. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the matrix. + + The scalar to subtract. + A new matrix containing the subtraction of this matrix and the scalar. + + + + Subtracts a scalar from each element of the matrix and stores the result in the result matrix. + + The scalar to subtract. + The matrix to store the result of the subtraction. + If this matrix and are not the same size. + + + + Subtracts each element of the matrix from a scalar. + + The scalar to subtract from. + A new matrix containing the subtraction of the scalar and this matrix. + + + + Subtracts each element of the matrix from a scalar and stores the result in the result matrix. + + The scalar to subtract from. + The matrix to store the result of the subtraction. + If this matrix and are not the same size. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The result of the subtraction. + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + If the two matrices don't have the same dimensions. + + + + Multiplies each element of this matrix with a scalar. + + The scalar to multiply with. + The result of the multiplication. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + If the result matrix's dimensions are not the same as this matrix. + + + + Divides each element of this matrix with a scalar. + + The scalar to divide with. + The result of the division. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + If the result matrix's dimensions are not the same as this matrix. + + + + Divides a scalar by each element of the matrix. + + The scalar to divide. + The result of the division. + + + + Divides a scalar by each element of the matrix and places results into the result matrix. + + The scalar to divide. + The matrix to store the result of the division. + If the result matrix's dimensions are not the same as this matrix. + + + + Multiplies this matrix by a vector and returns the result. + + The vector to multiply with. + The result of the multiplication. + If this.ColumnCount != rightSide.Count. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + If result.Count != this.RowCount. + If this.ColumnCount != .Count. + + + + Left multiply a matrix with a vector ( = vector * matrix ). + + The vector to multiply with. + The result of the multiplication. + If this.RowCount != .Count. + + + + Left multiply a matrix with a vector ( = vector * matrix ) and place the result in the result vector. + + The vector to multiply with. + The result of the multiplication. + If result.Count != this.ColumnCount. + If this.RowCount != .Count. + + + + Left multiply a matrix with a vector ( = vector * matrix ) and place the result in the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + If this.Columns != other.Rows. + If the result matrix's dimensions are not the this.Rows x other.Columns. + + + + Multiplies this matrix with another matrix and returns the result. + + The matrix to multiply with. + If this.Columns != other.Rows. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + If this.Columns != other.ColumnCount. + If the result matrix's dimensions are not the this.RowCount x other.RowCount. + + + + Multiplies this matrix with transpose of another matrix and returns the result. + + The matrix to multiply with. + If this.Columns != other.ColumnCount. + The result of the multiplication. + + + + Multiplies the transpose of this matrix by a vector and returns the result. + + The vector to multiply with. + The result of the multiplication. + If this.RowCount != rightSide.Count. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + If result.Count != this.ColumnCount. + If this.RowCount != .Count. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + If this.Rows != other.RowCount. + If the result matrix's dimensions are not the this.ColumnCount x other.ColumnCount. + + + + Multiplies the transpose of this matrix with another matrix and returns the result. + + The matrix to multiply with. + If this.Rows != other.RowCount. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + If this.Columns != other.ColumnCount. + If the result matrix's dimensions are not the this.RowCount x other.RowCount. + + + + Multiplies this matrix with the conjugate transpose of another matrix and returns the result. + + The matrix to multiply with. + If this.Columns != other.ColumnCount. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix by a vector and returns the result. + + The vector to multiply with. + The result of the multiplication. + If this.RowCount != rightSide.Count. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + If result.Count != this.ColumnCount. + If this.RowCount != .Count. + + + + Multiplies the conjugate transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + If this.Rows != other.RowCount. + If the result matrix's dimensions are not the this.ColumnCount x other.ColumnCount. + + + + Multiplies the conjugate transpose of this matrix with another matrix and returns the result. + + The matrix to multiply with. + If this.Rows != other.RowCount. + The result of the multiplication. + + + + Raises this square matrix to a positive integer exponent and places the results into the result matrix. + + The positive integer exponent to raise the matrix to. + The result of the power. + + + + Multiplies this square matrix with another matrix and returns the result. + + The positive integer exponent to raise the matrix to. + + + + Negate each element of this matrix. + + A matrix containing the negated values. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + if the result matrix's dimensions are not the same as this matrix. + + + + Complex conjugate each element of this matrix. + + A matrix containing the conjugated values. + + + + Complex conjugate each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + if the result matrix's dimensions are not the same as this matrix. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the matrix. + + The scalar denominator to use. + A matrix containing the results. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the matrix. + + The scalar numerator to use. + A matrix containing the results. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the matrix. + + The scalar numerator to use. + Matrix to store the results in. + + + + Computes the remainder (matrix % divisor), where the result has the sign of the dividend, + for each element of the matrix. + + The scalar denominator to use. + A matrix containing the results. + + + + Computes the remainder (matrix % divisor), where the result has the sign of the dividend, + for each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (dividend % matrix), where the result has the sign of the dividend, + for each element of the matrix. + + The scalar numerator to use. + A matrix containing the results. + + + + Computes the remainder (dividend % matrix), where the result has the sign of the dividend, + for each element of the matrix. + + The scalar numerator to use. + Matrix to store the results in. + + + + Pointwise multiplies this matrix with another matrix. + + The matrix to pointwise multiply with this one. + If this matrix and are not the same size. + A new matrix that is the pointwise multiplication of this matrix and . + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + If this matrix and are not the same size. + If this matrix and are not the same size. + + + + Pointwise divide this matrix by another matrix. + + The pointwise denominator matrix to use. + If this matrix and are not the same size. + A new matrix that is the pointwise division of this matrix and . + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use. + The matrix to store the result of the pointwise division. + If this matrix and are not the same size. + If this matrix and are not the same size. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + + + + Pointwise raise this matrix to an exponent. + + The exponent to raise this matrix values to. + The matrix to store the result into. + If this matrix and are not the same size. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + + + + Pointwise raise this matrix to an exponent. + + The exponent to raise this matrix values to. + The matrix to store the result into. + If this matrix and are not the same size. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this matrix by another matrix. + + The pointwise denominator matrix to use. + If this matrix and are not the same size. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this matrix by another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use. + The matrix to store the result of the pointwise modulus. + If this matrix and are not the same size. + If this matrix and are not the same size. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this matrix by another matrix. + + The pointwise denominator matrix to use. + If this matrix and are not the same size. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this matrix by another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use. + The matrix to store the result of the pointwise remainder. + If this matrix and are not the same size. + If this matrix and are not the same size. + + + + Helper function to apply a unary function to a matrix. The function + f modifies the matrix given to it in place. Before its + called, a copy of the 'this' matrix is first created, then passed to + f. The copy is then returned as the result + + Function which takes a matrix, modifies it in place and returns void + New instance of matrix which is the result + + + + Helper function to apply a unary function which modifies a matrix + in place. + + Function which takes a matrix, modifies it in place and returns void + The matrix to be passed to f and where the result is to be stored + If this vector and are not the same size. + + + + Helper function to apply a binary function which takes two matrices + and modifies the latter in place. A copy of the "this" matrix is + first made and then passed to f together with the other matrix. The + copy is then returned as the result + + Function which takes two matrices, modifies the second in place and returns void + The other matrix to be passed to the function as argument. It is not modified + The resulting matrix + If this amtrix and are not the same dimension. + + + + Helper function to apply a binary function which takes two matrices + and modifies the second one in place + + Function which takes two matrices, modifies the second in place and returns void + The other matrix to be passed to the function as argument. It is not modified + The resulting matrix + If this matrix and are not the same dimension. + + + + Pointwise applies the exponent function to each value. + + + + + Pointwise applies the exponent function to each value. + + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the natural logarithm function to each value. + + + + + Pointwise applies the natural logarithm function to each value. + + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the abs function to each value + + + + + Pointwise applies the abs function to each value + + The vector to store the result + + + + Pointwise applies the acos function to each value + + + + + Pointwise applies the acos function to each value + + The vector to store the result + + + + Pointwise applies the asin function to each value + + + + + Pointwise applies the asin function to each value + + The vector to store the result + + + + Pointwise applies the atan function to each value + + + + + Pointwise applies the atan function to each value + + The vector to store the result + + + + Pointwise applies the atan2 function to each value of the current + matrix and a given other matrix being the 'x' of atan2 and the + 'this' matrix being the 'y' + + + + + + + Pointwise applies the atan2 function to each value of the current + matrix and a given other matrix being the 'x' of atan2 and the + 'this' matrix being the 'y' + + + + + + + Pointwise applies the ceiling function to each value + + + + + Pointwise applies the ceiling function to each value + + The vector to store the result + + + + Pointwise applies the cos function to each value + + + + + Pointwise applies the cos function to each value + + The vector to store the result + + + + Pointwise applies the cosh function to each value + + + + + Pointwise applies the cosh function to each value + + The vector to store the result + + + + Pointwise applies the floor function to each value + + + + + Pointwise applies the floor function to each value + + The vector to store the result + + + + Pointwise applies the log10 function to each value + + + + + Pointwise applies the log10 function to each value + + The vector to store the result + + + + Pointwise applies the round function to each value + + + + + Pointwise applies the round function to each value + + The vector to store the result + + + + Pointwise applies the sign function to each value + + + + + Pointwise applies the sign function to each value + + The vector to store the result + + + + Pointwise applies the sin function to each value + + + + + Pointwise applies the sin function to each value + + The vector to store the result + + + + Pointwise applies the sinh function to each value + + + + + Pointwise applies the sinh function to each value + + The vector to store the result + + + + Pointwise applies the sqrt function to each value + + + + + Pointwise applies the sqrt function to each value + + The vector to store the result + + + + Pointwise applies the tan function to each value + + + + + Pointwise applies the tan function to each value + + The vector to store the result + + + + Pointwise applies the tanh function to each value + + + + + Pointwise applies the tanh function to each value + + The vector to store the result + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + + Calculates the rank of the matrix. + + effective numerical rank, obtained from SVD + + + + Calculates the nullity of the matrix. + + effective numerical nullity, obtained from SVD + + + Calculates the condition number of this matrix. + The condition number of the matrix. + The condition number is calculated using singular value decomposition. + + + Computes the determinant of this matrix. + The determinant of this matrix. + + + + Computes an orthonormal basis for the null space of this matrix, + also known as the kernel of the corresponding matrix transformation. + + + + + Computes an orthonormal basis for the column space of this matrix, + also known as the range or image of the corresponding matrix transformation. + + + + Computes the inverse of this matrix. + The inverse of this matrix. + + + Computes the Moore-Penrose Pseudo-Inverse of this matrix. + + + + Computes the Kronecker product of this matrix with the given matrix. The new matrix is M-by-N + with M = this.Rows * lower.Rows and N = this.Columns * lower.Columns. + + The other matrix. + The Kronecker product of the two matrices. + + + + Computes the Kronecker product of this matrix with the given matrix. The new matrix is M-by-N + with M = this.Rows * lower.Rows and N = this.Columns * lower.Columns. + + The other matrix. + The Kronecker product of the two matrices. + If the result matrix's dimensions are not (this.Rows * lower.rows) x (this.Columns * lower.Columns). + + + + Pointwise applies the minimum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the minimum with a scalar to each value. + + The scalar value to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the maximum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the maximum with a scalar to each value. + + The scalar value to compare to. + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the absolute minimum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the absolute minimum with a scalar to each value. + + The scalar value to compare to. + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the absolute maximum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the absolute maximum with a scalar to each value. + + The scalar value to compare to. + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the minimum with the values of another matrix to each value. + + The matrix with the values to compare to. + + + + Pointwise applies the minimum with the values of another matrix to each value. + + The matrix with the values to compare to. + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the maximum with the values of another matrix to each value. + + The matrix with the values to compare to. + + + + Pointwise applies the maximum with the values of another matrix to each value. + + The matrix with the values to compare to. + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the absolute minimum with the values of another matrix to each value. + + The matrix with the values to compare to. + + + + Pointwise applies the absolute minimum with the values of another matrix to each value. + + The matrix with the values to compare to. + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the absolute maximum with the values of another matrix to each value. + + The matrix with the values to compare to. + + + + Pointwise applies the absolute maximum with the values of another matrix to each value. + + The matrix with the values to compare to. + The matrix to store the result. + If this matrix and are not the same size. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced L2 norm of the matrix. + The largest singular value of the matrix. + + For sparse matrices, the L2 norm is computed using a dense implementation of singular value decomposition. + In a later release, it will be replaced with a sparse implementation. + + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Calculates the p-norms of all row vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the p-norms of all column vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all row vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all column vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the value sum of each row vector. + + + + + Calculates the value sum of each column vector. + + + + + Calculates the absolute value sum of each row vector. + + + + + Calculates the absolute value sum of each column vector. + + + + + Indicates whether the current object is equal to another object of the same type. + + + An object to compare with this object. + + + true if the current object is equal to the parameter; otherwise, false. + + + + + Determines whether the specified is equal to this instance. + + The to compare with this instance. + + true if the specified is equal to this instance; otherwise, false. + + + + + Returns a hash code for this instance. + + + A hash code for this instance, suitable for use in hashing algorithms and data structures like a hash table. + + + + + Creates a new object that is a copy of the current instance. + + + A new object that is a copy of this instance. + + + + + Returns a string that describes the type, dimensions and shape of this matrix. + + + + + Returns a string 2D array that summarizes the content of this matrix. + + + + + Returns a string 2D array that summarizes the content of this matrix. + + + + + Returns a string that summarizes the content of this matrix. + + + + + Returns a string that summarizes the content of this matrix. + + + + + Returns a string that summarizes this matrix. + + + + + Returns a string that summarizes this matrix. + The maximum number of cells can be configured in the class. + + + + + Returns a string that summarizes this matrix. + The maximum number of cells can be configured in the class. + The format string is ignored. + + + + + Initializes a new instance of the Matrix class. + + + + + Gets the raw matrix data storage. + + + + + Gets the number of columns. + + The number of columns. + + + + Gets the number of rows. + + The number of rows. + + + + Gets or sets the value at the given row and column, with range checking. + + + The row of the element. + + + The column of the element. + + The value to get or set. + This method is ranged checked. and + to get and set values without range checking. + + + + Retrieves the requested element without range checking. + + + The row of the element. + + + The column of the element. + + + The requested element. + + + + + Sets the value of the given element without range checking. + + + The row of the element. + + + The column of the element. + + + The value to set the element to. + + + + + Sets all values to zero. + + + + + Sets all values of a row to zero. + + + + + Sets all values of a column to zero. + + + + + Sets all values for all of the chosen rows to zero. + + + + + Sets all values for all of the chosen columns to zero. + + + + + Sets all values of a sub-matrix to zero. + + + + + Set all values whose absolute value is smaller than the threshold to zero, in-place. + + + + + Set all values that meet the predicate to zero, in-place. + + + + + Creates a clone of this instance. + + + A clone of the instance. + + + + + Copies the elements of this matrix to the given matrix. + + + The matrix to copy values into. + + + If target is . + + + If this and the target matrix do not have the same dimensions.. + + + + + Copies a row into an Vector. + + The row to copy. + A Vector containing the copied elements. + If is negative, + or greater than or equal to the number of rows. + + + + Copies a row into to the given Vector. + + The row to copy. + The Vector to copy the row into. + If the result vector is . + If is negative, + or greater than or equal to the number of rows. + If this.Columns != result.Count. + + + + Copies the requested row elements into a new Vector. + + The row to copy elements from. + The column to start copying from. + The number of elements to copy. + A Vector containing the requested elements. + If: + is negative, + or greater than or equal to the number of rows. + is negative, + or greater than or equal to the number of columns. + (columnIndex + length) >= Columns. + If is not positive. + + + + Copies the requested row elements into a new Vector. + + The row to copy elements from. + The column to start copying from. + The number of elements to copy. + The Vector to copy the column into. + If the result Vector is . + If is negative, + or greater than or equal to the number of columns. + If is negative, + or greater than or equal to the number of rows. + If + + is greater than or equal to the number of rows. + If is not positive. + If result.Count < length. + + + + Copies a column into a new Vector>. + + The column to copy. + A Vector containing the copied elements. + If is negative, + or greater than or equal to the number of columns. + + + + Copies a column into to the given Vector. + + The column to copy. + The Vector to copy the column into. + If the result Vector is . + If is negative, + or greater than or equal to the number of columns. + If this.Rows != result.Count. + + + + Copies the requested column elements into a new Vector. + + The column to copy elements from. + The row to start copying from. + The number of elements to copy. + A Vector containing the requested elements. + If: + is negative, + or greater than or equal to the number of columns. + is negative, + or greater than or equal to the number of rows. + (rowIndex + length) >= Rows. + + If is not positive. + + + + Copies the requested column elements into the given vector. + + The column to copy elements from. + The row to start copying from. + The number of elements to copy. + The Vector to copy the column into. + If the result Vector is . + If is negative, + or greater than or equal to the number of columns. + If is negative, + or greater than or equal to the number of rows. + If + + is greater than or equal to the number of rows. + If is not positive. + If result.Count < length. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Creates a matrix that contains the values from the requested sub-matrix. + + The row to start copying from. + The number of rows to copy. Must be positive. + The column to start copying from. + The number of columns to copy. Must be positive. + The requested sub-matrix. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + If or + is not positive. + + + + Returns the elements of the diagonal in a Vector. + + The elements of the diagonal. + For non-square matrices, the method returns Min(Rows, Columns) elements where + i == j (i is the row index, and j is the column index). + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Creates a new matrix and inserts the given column at the given index. + + The index of where to insert the column. + The column to insert. + A new matrix with the inserted column. + If is . + If is < zero or > the number of columns. + If the size of != the number of rows. + + + + Creates a new matrix with the given column removed. + + The index of the column to remove. + A new matrix without the chosen column. + If is < zero or >= the number of columns. + + + + Copies the values of the given Vector to the specified column. + + The column to copy the values to. + The vector to copy the values from. + If is . + If is less than zero, + or greater than or equal to the number of columns. + If the size of does not + equal the number of rows of this Matrix. + + + + Copies the values of the given Vector to the specified sub-column. + + The column to copy the values to. + The row to start copying to. + The number of elements to copy. + The vector to copy the values from. + If is . + If is less than zero, + or greater than or equal to the number of columns. + If the size of does not + equal the number of rows of this Matrix. + + + + Copies the values of the given array to the specified column. + + The column to copy the values to. + The array to copy the values from. + If is . + If is less than zero, + or greater than or equal to the number of columns. + If the size of does not + equal the number of rows of this Matrix. + If the size of does not + equal the number of rows of this Matrix. + + + + Creates a new matrix and inserts the given row at the given index. + + The index of where to insert the row. + The row to insert. + A new matrix with the inserted column. + If is . + If is < zero or > the number of rows. + If the size of != the number of columns. + + + + Creates a new matrix with the given row removed. + + The index of the row to remove. + A new matrix without the chosen row. + If is < zero or >= the number of rows. + + + + Copies the values of the given Vector to the specified row. + + The row to copy the values to. + The vector to copy the values from. + If is . + If is less than zero, + or greater than or equal to the number of rows. + If the size of does not + equal the number of columns of this Matrix. + + + + Copies the values of the given Vector to the specified sub-row. + + The row to copy the values to. + The column to start copying to. + The number of elements to copy. + The vector to copy the values from. + If is . + If is less than zero, + or greater than or equal to the number of rows. + If the size of does not + equal the number of columns of this Matrix. + + + + Copies the values of the given array to the specified row. + + The row to copy the values to. + The array to copy the values from. + If is . + If is less than zero, + or greater than or equal to the number of rows. + If the size of does not + equal the number of columns of this Matrix. + + + + Copies the values of a given matrix into a region in this matrix. + + The row to start copying to. + The column to start copying to. + The sub-matrix to copy from. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + + + + Copies the values of a given matrix into a region in this matrix. + + The row to start copying to. + The number of rows to copy. Must be positive. + The column to start copying to. + The number of columns to copy. Must be positive. + The sub-matrix to copy from. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + the size of is not at least x . + If or + is not positive. + + + + Copies the values of a given matrix into a region in this matrix. + + The row to start copying to. + The row of the sub-matrix to start copying from. + The number of rows to copy. Must be positive. + The column to start copying to. + The column of the sub-matrix to start copying from. + The number of columns to copy. Must be positive. + The sub-matrix to copy from. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + the size of is not at least x . + If or + is not positive. + + + + Copies the values of the given Vector to the diagonal. + + The vector to copy the values from. The length of the vector should be + Min(Rows, Columns). + If is . + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + + Copies the values of the given array to the diagonal. + + The array to copy the values from. The length of the vector should be + Min(Rows, Columns). + If is . + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + + Returns the transpose of this matrix. + + The transpose of this matrix. + + + + Puts the transpose of this matrix into the result matrix. + + + + + Returns the conjugate transpose of this matrix. + + The conjugate transpose of this matrix. + + + + Puts the conjugate transpose of this matrix into the result matrix. + + + + + Permute the rows of a matrix according to a permutation. + + The row permutation to apply to this matrix. + + + + Permute the columns of a matrix according to a permutation. + + The column permutation to apply to this matrix. + + + + Concatenates this matrix with the given matrix. + + The matrix to concatenate. + The combined matrix. + + + + + + Concatenates this matrix with the given matrix and places the result into the result matrix. + + The matrix to concatenate. + The combined matrix. + + + + + + Stacks this matrix on top of the given matrix and places the result into the result matrix. + + The matrix to stack this matrix upon. + The combined matrix. + If lower is . + If upper.Columns != lower.Columns. + + + + + + Stacks this matrix on top of the given matrix and places the result into the result matrix. + + The matrix to stack this matrix upon. + The combined matrix. + If lower is . + If upper.Columns != lower.Columns. + + + + + + Diagonally stacks his matrix on top of the given matrix. The new matrix is a M-by-N matrix, + where M = this.Rows + lower.Rows and N = this.Columns + lower.Columns. + The values of off the off diagonal matrices/blocks are set to zero. + + The lower, right matrix. + If lower is . + the combined matrix + + + + + + Diagonally stacks his matrix on top of the given matrix and places the combined matrix into the result matrix. + + The lower, right matrix. + The combined matrix + If lower is . + If the result matrix is . + If the result matrix's dimensions are not (this.Rows + lower.rows) x (this.Columns + lower.Columns). + + + + + + Evaluates whether this matrix is symmetric. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + Evaluates whether this matrix is conjugate symmetric. + + + + + Returns this matrix as a multidimensional array. + The returned array will be independent from this matrix. + A new memory block will be allocated for the array. + + A multidimensional containing the values of this matrix. + + + + Returns the matrix's elements as an array with the data laid out column by column (column major). + The returned array will be independent from this matrix. + A new memory block will be allocated for the array. + +
+            1, 2, 3
+            4, 5, 6  will be returned as  1, 4, 7, 2, 5, 8, 3, 6, 9
+            7, 8, 9
+            
+ An array containing the matrix's elements. + + +
+ + + Returns the matrix's elements as an array with the data laid row by row (row major). + The returned array will be independent from this matrix. + A new memory block will be allocated for the array. + +
+            1, 2, 3
+            4, 5, 6  will be returned as  1, 2, 3, 4, 5, 6, 7, 8, 9
+            7, 8, 9
+            
+ An array containing the matrix's elements. + + +
+ + + Returns this matrix as array of row arrays. + The returned arrays will be independent from this matrix. + A new memory block will be allocated for the arrays. + + + + + Returns this matrix as array of column arrays. + The returned arrays will be independent from this matrix. + A new memory block will be allocated for the arrays. + + + + + Returns the internal multidimensional array of this matrix if, and only if, this matrix is stored by such an array internally. + Otherwise returns null. Changes to the returned array and the matrix will affect each other. + Use ToArray instead if you always need an independent array. + + + + + Returns the internal column by column (column major) array of this matrix if, and only if, this matrix is stored by such arrays internally. + Otherwise returns null. Changes to the returned arrays and the matrix will affect each other. + Use ToColumnMajorArray instead if you always need an independent array. + +
+            1, 2, 3
+            4, 5, 6  will be returned as  1, 4, 7, 2, 5, 8, 3, 6, 9
+            7, 8, 9
+            
+ An array containing the matrix's elements. + + +
+ + + Returns the internal row by row (row major) array of this matrix if, and only if, this matrix is stored by such arrays internally. + Otherwise returns null. Changes to the returned arrays and the matrix will affect each other. + Use ToRowMajorArray instead if you always need an independent array. + +
+            1, 2, 3
+            4, 5, 6  will be returned as  1, 2, 3, 4, 5, 6, 7, 8, 9
+            7, 8, 9
+            
+ An array containing the matrix's elements. + + +
+ + + Returns the internal row arrays of this matrix if, and only if, this matrix is stored by such arrays internally. + Otherwise returns null. Changes to the returned arrays and the matrix will affect each other. + Use ToRowArrays instead if you always need an independent array. + + + + + Returns the internal column arrays of this matrix if, and only if, this matrix is stored by such arrays internally. + Otherwise returns null. Changes to the returned arrays and the matrix will affect each other. + Use ToColumnArrays instead if you always need an independent array. + + + + + Returns an IEnumerable that can be used to iterate through all values of the matrix. + + + The enumerator will include all values, even if they are zero. + The ordering of the values is unspecified (not necessarily column-wise or row-wise). + + + + + Returns an IEnumerable that can be used to iterate through all values of the matrix. + + + The enumerator will include all values, even if they are zero. + The ordering of the values is unspecified (not necessarily column-wise or row-wise). + + + + + Returns an IEnumerable that can be used to iterate through all values of the matrix and their index. + + + The enumerator returns a Tuple with the first two values being the row and column index + and the third value being the value of the element at that index. + The enumerator will include all values, even if they are zero. + + + + + Returns an IEnumerable that can be used to iterate through all values of the matrix and their index. + + + The enumerator returns a Tuple with the first two values being the row and column index + and the third value being the value of the element at that index. + The enumerator will include all values, even if they are zero. + + + + + Returns an IEnumerable that can be used to iterate through all non-zero values of the matrix. + + + The enumerator will skip all elements with a zero value. + + + + + Returns an IEnumerable that can be used to iterate through all non-zero values of the matrix and their index. + + + The enumerator returns a Tuple with the first two values being the row and column index + and the third value being the value of the element at that index. + The enumerator will skip all elements with a zero value. + + + + + Returns an IEnumerable that can be used to iterate through all columns of the matrix. + + + + + Returns an IEnumerable that can be used to iterate through a subset of all columns of the matrix. + + The column to start enumerating over. + The number of columns to enumerating over. + + + + Returns an IEnumerable that can be used to iterate through all columns of the matrix and their index. + + + The enumerator returns a Tuple with the first value being the column index + and the second value being the value of the column at that index. + + + + + Returns an IEnumerable that can be used to iterate through a subset of all columns of the matrix and their index. + + The column to start enumerating over. + The number of columns to enumerating over. + + The enumerator returns a Tuple with the first value being the column index + and the second value being the value of the column at that index. + + + + + Returns an IEnumerable that can be used to iterate through all rows of the matrix. + + + + + Returns an IEnumerable that can be used to iterate through a subset of all rows of the matrix. + + The row to start enumerating over. + The number of rows to enumerating over. + + + + Returns an IEnumerable that can be used to iterate through all rows of the matrix and their index. + + + The enumerator returns a Tuple with the first value being the row index + and the second value being the value of the row at that index. + + + + + Returns an IEnumerable that can be used to iterate through a subset of all rows of the matrix and their index. + + The row to start enumerating over. + The number of rows to enumerating over. + + The enumerator returns a Tuple with the first value being the row index + and the second value being the value of the row at that index. + + + + + Applies a function to each value of this matrix and replaces the value with its result. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + Applies a function to each value of this matrix and replaces the value with its result. + The row and column indices of each value (zero-based) are passed as first arguments to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + Applies a function to each value of this matrix and replaces the value in the result matrix. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + Applies a function to each value of this matrix and replaces the value in the result matrix. + The index of each value (zero-based) is passed as first argument to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + Applies a function to each value of this matrix and replaces the value in the result matrix. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + Applies a function to each value of this matrix and replaces the value in the result matrix. + The index of each value (zero-based) is passed as first argument to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + Applies a function to each value of this matrix and returns the results as a new matrix. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + Applies a function to each value of this matrix and returns the results as a new matrix. + The index of each value (zero-based) is passed as first argument to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + For each row, applies a function f to each element of the row, threading an accumulator argument through the computation. + Returns an array with the resulting accumulator states for each row. + + + + + For each column, applies a function f to each element of the column, threading an accumulator argument through the computation. + Returns an array with the resulting accumulator states for each column. + + + + + Applies a function f to each row vector, threading an accumulator vector argument through the computation. + Returns the resulting accumulator vector. + + + + + Applies a function f to each column vector, threading an accumulator vector argument through the computation. + Returns the resulting accumulator vector. + + + + + Reduces all row vectors by applying a function between two of them, until only a single vector is left. + + + + + Reduces all column vectors by applying a function between two of them, until only a single vector is left. + + + + + Applies a function to each value pair of two matrices and replaces the value in the result vector. + + + + + Applies a function to each value pair of two matrices and returns the results as a new vector. + + + + + Applies a function to update the status with each value pair of two matrices and returns the resulting status. + + + + + Returns a tuple with the index and value of the first element satisfying a predicate, or null if none is found. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns a tuple with the index and values of the first element pair of two matrices of the same size satisfying a predicate, or null if none is found. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if at least one element satisfies a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if at least one element pairs of two matrices of the same size satisfies a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if all elements satisfy a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if all element pairs of two matrices of the same size satisfy a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Adds a scalar to each element of the matrix. + + This operator will allocate new memory for the result. It will + choose the representation of the provided matrix. + The left matrix to add. + The scalar value to add. + The result of the addition. + If is . + + + + Adds a scalar to each element of the matrix. + + This operator will allocate new memory for the result. It will + choose the representation of the provided matrix. + The scalar value to add. + The right matrix to add. + The result of the addition. + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the subtraction. + If and don't have the same dimensions. + If or is . + + + + Subtracts a scalar from each element of a matrix. + + This operator will allocate new memory for the result. It will + choose the representation of the provided matrix. + The left matrix to subtract. + The scalar value to subtract. + The result of the subtraction. + If and don't have the same dimensions. + If or is . + + + + Substracts each element of a matrix from a scalar. + + This operator will allocate new memory for the result. It will + choose the representation of the provided matrix. + The scalar value to subtract. + The right matrix to subtract. + The result of the subtraction. + If and don't have the same dimensions. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Divides a scalar with a matrix. + + The scalar to divide. + The matrix. + The result of the division. + If is . + + + + Divides a matrix with a scalar. + + The matrix to divide. + The scalar value. + The result of the division. + If is . + + + + Computes the pointwise remainder (% operator), where the result has the sign of the dividend, + of each element of the matrix of the given divisor. + + The matrix whose elements we want to compute the modulus of. + The divisor to use. + The result of the calculation + If is . + + + + Computes the pointwise remainder (% operator), where the result has the sign of the dividend, + of the given dividend of each element of the matrix. + + The dividend we want to compute the modulus of. + The matrix whose elements we want to use as divisor. + The result of the calculation + If is . + + + + Computes the pointwise remainder (% operator), where the result has the sign of the dividend, + of each element of two matrices. + + The matrix whose elements we want to compute the remainder of. + The divisor to use. + If and are not the same size. + If is . + + + + Computes the sqrt of a matrix pointwise + + The input matrix + + + + + Computes the exponential of a matrix pointwise + + The input matrix + + + + + Computes the log of a matrix pointwise + + The input matrix + + + + + Computes the log10 of a matrix pointwise + + The input matrix + + + + + Computes the sin of a matrix pointwise + + The input matrix + + + + + Computes the cos of a matrix pointwise + + The input matrix + + + + + Computes the tan of a matrix pointwise + + The input matrix + + + + + Computes the asin of a matrix pointwise + + The input matrix + + + + + Computes the acos of a matrix pointwise + + The input matrix + + + + + Computes the atan of a matrix pointwise + + The input matrix + + + + + Computes the sinh of a matrix pointwise + + The input matrix + + + + + Computes the cosh of a matrix pointwise + + The input matrix + + + + + Computes the tanh of a matrix pointwise + + The input matrix + + + + + Computes the absolute value of a matrix pointwise + + The input matrix + + + + + Computes the floor of a matrix pointwise + + The input matrix + + + + + Computes the ceiling of a matrix pointwise + + The input matrix + + + + + Computes the rounded value of a matrix pointwise + + The input matrix + + + + + Computes the Cholesky decomposition for a matrix. + + The Cholesky decomposition object. + + + + Computes the LU decomposition for a matrix. + + The LU decomposition object. + + + + Computes the QR decomposition for a matrix. + + The type of QR factorization to perform. + The QR decomposition object. + + + + Computes the QR decomposition for a matrix using Modified Gram-Schmidt Orthogonalization. + + The QR decomposition object. + + + + Computes the SVD decomposition for a matrix. + + Compute the singular U and VT vectors or not. + The SVD decomposition object. + + + + Computes the EVD decomposition for a matrix. + + The EVD decomposition object. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix (this matrix), b is the solution vector and x is the unknown vector. + + The solution vector b. + The result vector x. + The iterative solver to use. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + Solves the matrix equation AX = B, where A is the coefficient matrix (this matrix), B is the solution matrix and X is the unknown matrix. + + The solution matrix B. + The result matrix X + The iterative solver to use. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix (this matrix), b is the solution vector and x is the unknown vector. + + The solution vector b. + The result vector x. + The iterative solver to use. + Criteria to control when to stop iterating. + The preconditioner to use for approximations. + + + + Solves the matrix equation AX = B, where A is the coefficient matrix (this matrix), B is the solution matrix and X is the unknown matrix. + + The solution matrix B. + The result matrix X + The iterative solver to use. + Criteria to control when to stop iterating. + The preconditioner to use for approximations. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix (this matrix), b is the solution vector and x is the unknown vector. + + The solution vector b. + The result vector x. + The iterative solver to use. + Criteria to control when to stop iterating. + + + + Solves the matrix equation AX = B, where A is the coefficient matrix (this matrix), B is the solution matrix and X is the unknown matrix. + + The solution matrix B. + The result matrix X + The iterative solver to use. + Criteria to control when to stop iterating. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix (this matrix), b is the solution vector and x is the unknown vector. + + The solution vector b. + The iterative solver to use. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + The result vector x. + + + + Solves the matrix equation AX = B, where A is the coefficient matrix (this matrix), B is the solution matrix and X is the unknown matrix. + + The solution matrix B. + The iterative solver to use. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + The result matrix X. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix (this matrix), b is the solution vector and x is the unknown vector. + + The solution vector b. + The iterative solver to use. + Criteria to control when to stop iterating. + The preconditioner to use for approximations. + The result vector x. + + + + Solves the matrix equation AX = B, where A is the coefficient matrix (this matrix), B is the solution matrix and X is the unknown matrix. + + The solution matrix B. + The iterative solver to use. + Criteria to control when to stop iterating. + The preconditioner to use for approximations. + The result matrix X. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix (this matrix), b is the solution vector and x is the unknown vector. + + The solution vector b. + The iterative solver to use. + Criteria to control when to stop iterating. + The result vector x. + + + + Solves the matrix equation AX = B, where A is the coefficient matrix (this matrix), B is the solution matrix and X is the unknown matrix. + + The solution matrix B. + The iterative solver to use. + Criteria to control when to stop iterating. + The result matrix X. + + + + Converts a matrix to single precision. + + + + + Converts a matrix to double precision. + + + + + Converts a matrix to single precision complex numbers. + + + + + Converts a matrix to double precision complex numbers. + + + + + Gets a single precision complex matrix with the real parts from the given matrix. + + + + + Gets a double precision complex matrix with the real parts from the given matrix. + + + + + Gets a real matrix representing the real parts of a complex matrix. + + + + + Gets a real matrix representing the real parts of a complex matrix. + + + + + Gets a real matrix representing the imaginary parts of a complex matrix. + + + + + Gets a real matrix representing the imaginary parts of a complex matrix. + + + + + Existing data may not be all zeros, so clearing may be necessary + if not all of it will be overwritten anyway. + + + + + If existing data is assumed to be all zeros already, + clearing it may be skipped if applicable. + + + + + Allow skipping zero entries (without enforcing skipping them). + When enumerating sparse matrices this can significantly speed up operations. + + + + + Force applying the operation to all fields even if they are zero. + + + + + It is not known yet whether a matrix is symmetric or not. + + + + + A matrix is symmetric + + + + + A matrix is hermitian (conjugate symmetric). + + + + + A matrix is not symmetric + + + + + Defines an that uses a cancellation token as stop criterion. + + + + + Initializes a new instance of the class. + + + + + Initializes a new instance of the class. + + + + + Determines the status of the iterative calculation based on the stop criteria stored + by the current . Result is set into Status field. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual stop criteria may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Gets the current calculation status. + + + + + Resets the to the pre-calculation state. + + + + + Clones the current and its settings. + + A new instance of the class. + + + + Stop criterion that delegates the status determination to a delegate. + + + + + Create a new instance of this criterion with a custom implementation. + + Custom implementation with the same signature and semantics as the DetermineStatus method. + + + + Determines the status of the iterative calculation by delegating it to the provided delegate. + Result is set into Status field. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual stop criteria may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Gets the current calculation status. + + + + + Resets the IIterationStopCriterion to the pre-calculation state. + + + + + Clones this criterion and its settings. + + + + + Monitors an iterative calculation for signs of divergence. + + + + + The maximum relative increase the residual may experience without triggering a divergence warning. + + + + + The number of iterations over which a residual increase should be tracked before issuing a divergence warning. + + + + + The status of the calculation + + + + + The array that holds the tracking information. + + + + + The iteration number of the last iteration. + + + + + Initializes a new instance of the class with the specified maximum + relative increase and the specified minimum number of tracking iterations. + + The maximum relative increase that the residual may experience before a divergence warning is issued. + The minimum number of iterations over which the residual must grow before a divergence warning is issued. + + + + Gets or sets the maximum relative increase that the residual may experience before a divergence warning is issued. + + Thrown if the Maximum is set to zero or below. + + + + Gets or sets the minimum number of iterations over which the residual must grow before + issuing a divergence warning. + + Thrown if the value is set to less than one. + + + + Determines the status of the iterative calculation based on the stop criteria stored + by the current . Result is set into Status field. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual stop criteria may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Detect if solution is diverging + + true if diverging, otherwise false + + + + Gets required history Length + + + + + Gets the current calculation status. + + + + + Resets the to the pre-calculation state. + + + + + Clones the current and its settings. + + A new instance of the class. + + + + Defines an that monitors residuals for NaN's. + + + + + The status of the calculation + + + + + The iteration number of the last iteration. + + + + + Determines the status of the iterative calculation based on the stop criteria stored + by the current . Result is set into Status field. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual stop criteria may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Gets the current calculation status. + + + + + Resets the to the pre-calculation state. + + + + + Clones the current and its settings. + + A new instance of the class. + + + + The base interface for classes that provide stop criteria for iterative calculations. + + + + + Determines the status of the iterative calculation based on the stop criteria stored + by the current IIterationStopCriterion. Status is set to Status field of current object. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual stop criteria may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Gets the current calculation status. + + is not a legal value. Status should be set in implementation. + + + + Resets the IIterationStopCriterion to the pre-calculation state. + + To implementers: Invoking this method should not clear the user defined + property values, only the state that is used to track the progress of the + calculation. + + + + Defines the interface for classes that solve the matrix equation Ax = b in + an iterative manner. + + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + Defines the interface for objects that can create an iterative solver with + specific settings. This interface is used to pass iterative solver creation + setup information around. + + + + + Gets the type of the solver that will be created by this setup object. + + + + + Gets type of preconditioner, if any, that will be created by this setup object. + + + + + Creates the iterative solver to be used. + + + + + Creates the preconditioner to be used by default (can be overwritten). + + + + + Gets the relative speed of the solver. + + Returns a value between 0 and 1, inclusive. + + + + Gets the relative reliability of the solver. + + Returns a value between 0 and 1 inclusive. + + + + The base interface for preconditioner classes. + + + + Preconditioners are used by iterative solvers to improve the convergence + speed of the solving process. Increase in convergence speed + is related to the number of iterations necessary to get a converged solution. + So while in general the use of a preconditioner means that the iterative + solver will perform fewer iterations it does not guarantee that the actual + solution time decreases given that some preconditioners can be expensive to + setup and run. + + + Note that in general changes to the matrix will invalidate the preconditioner + if the changes occur after creating the preconditioner. + + + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix on which the preconditioner is based. + + + + Approximates the solution to the matrix equation Mx = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + Defines an that monitors the numbers of iteration + steps as stop criterion. + + + + + The default value for the maximum number of iterations the process is allowed + to perform. + + + + + The maximum number of iterations the calculation is allowed to perform. + + + + + The status of the calculation + + + + + Initializes a new instance of the class with the default maximum + number of iterations. + + + + + Initializes a new instance of the class with the specified maximum + number of iterations. + + The maximum number of iterations the calculation is allowed to perform. + + + + Gets or sets the maximum number of iterations the calculation is allowed to perform. + + Thrown if the Maximum is set to a negative value. + + + + Returns the maximum number of iterations to the default. + + + + + Determines the status of the iterative calculation based on the stop criteria stored + by the current . Result is set into Status field. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual stop criteria may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Gets the current calculation status. + + + + + Resets the to the pre-calculation state. + + + + + Clones the current and its settings. + + A new instance of the class. + + + + Iterative Calculation Status + + + + + An iterator that is used to check if an iterative calculation should continue or stop. + + + + + The collection that holds all the stop criteria and the flag indicating if they should be added + to the child iterators. + + + + + The status of the iterator. + + + + + Initializes a new instance of the class with the default stop criteria. + + + + + Initializes a new instance of the class with the specified stop criteria. + + + The specified stop criteria. Only one stop criterion of each type can be passed in. None + of the stop criteria will be passed on to child iterators. + + + + + Initializes a new instance of the class with the specified stop criteria. + + + The specified stop criteria. Only one stop criterion of each type can be passed in. None + of the stop criteria will be passed on to child iterators. + + + + + Gets the current calculation status. + + + + + Determines the status of the iterative calculation based on the stop criteria stored + by the current . Result is set into Status field. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual iterators may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Indicates to the iterator that the iterative process has been cancelled. + + + Does not reset the stop-criteria. + + + + + Resets the to the pre-calculation state. + + + + + Creates a deep clone of the current iterator. + + The deep clone of the current iterator. + + + + Defines an that monitors residuals as stop criterion. + + + + + The maximum value for the residual below which the calculation is considered converged. + + + + + The minimum number of iterations for which the residual has to be below the maximum before + the calculation is considered converged. + + + + + The status of the calculation + + + + + The number of iterations since the residuals got below the maximum. + + + + + The iteration number of the last iteration. + + + + + Initializes a new instance of the class with the specified + maximum residual and minimum number of iterations. + + + The maximum value for the residual below which the calculation is considered converged. + + + The minimum number of iterations for which the residual has to be below the maximum before + the calculation is considered converged. + + + + + Gets or sets the maximum value for the residual below which the calculation is considered + converged. + + Thrown if the Maximum is set to a negative value. + + + + Gets or sets the minimum number of iterations for which the residual has to be + below the maximum before the calculation is considered converged. + + Thrown if the BelowMaximumFor is set to a value less than 1. + + + + Determines the status of the iterative calculation based on the stop criteria stored + by the current . Result is set into Status field. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual stop criteria may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Gets the current calculation status. + + + + + Resets the to the pre-calculation state. + + + + + Clones the current and its settings. + + A new instance of the class. + + + + Loads the available objects from the specified assembly. + + The assembly which will be searched for setup objects. + If true, types that fail to load are simply ignored. Otherwise the exception is rethrown. + The types that should not be loaded. + + + + Loads the available objects from the specified assembly. + + The type in the assembly which should be searched for setup objects. + If true, types that fail to load are simply ignored. Otherwise the exception is rethrown. + The types that should not be loaded. + + + + Loads the available objects from the specified assembly. + + The of the assembly that should be searched for setup objects. + If true, types that fail to load are simply ignored. Otherwise the exception is rethrown. + The types that should not be loaded. + + + + Loads the available objects from the Math.NET Numerics assembly. + + The types that should not be loaded. + + + + Loads the available objects from the Math.NET Numerics assembly. + + + + + A unit preconditioner. This preconditioner does not actually do anything + it is only used when running an without + a preconditioner. + + + + + The coefficient matrix on which this preconditioner operates. + Is used to check dimensions on the different vectors that are processed. + + + + + Initializes the preconditioner and loads the internal data structures. + + + The matrix upon which the preconditioner is based. + + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + If and do not have the same size. + + + - or - + + + If the size of is different the number of rows of the coefficient matrix. + + + + + + True if the matrix storage format is dense. + + + + + True if all fields of this matrix can be set to any value. + False if some fields are fixed, like on a diagonal matrix. + + + + + True if the specified field can be set to any value. + False if the field is fixed, like an off-diagonal field on a diagonal matrix. + + + + + Retrieves the requested element without range checking. + + + + + Sets the element without range checking. + + + + + Evaluate the row and column at a specific data index. + + + + + True if the vector storage format is dense. + + + + + Retrieves the requested element without range checking. + + + + + Sets the element without range checking. + + + + + True if the matrix storage format is dense. + + + + + True if all fields of this matrix can be set to any value. + False if some fields are fixed, like on a diagonal matrix. + + + + + True if the specified field can be set to any value. + False if the field is fixed, like an off-diagonal field on a diagonal matrix. + + + + + Retrieves the requested element without range checking. + + + + + Sets the element without range checking. + + + + + Returns a hash code for this instance. + + + A hash code for this instance, suitable for use in hashing algorithms and data structures like a hash table. + + + + + True if the matrix storage format is dense. + + + + + True if all fields of this matrix can be set to any value. + False if some fields are fixed, like on a diagonal matrix. + + + + + True if the specified field can be set to any value. + False if the field is fixed, like an off-diagonal field on a diagonal matrix. + + + + + Gets or sets the value at the given row and column, with range checking. + + + The row of the element. + + + The column of the element. + + The value to get or set. + This method is ranged checked. and + to get and set values without range checking. + + + + Retrieves the requested element without range checking. + + + The row of the element. + + + The column of the element. + + + The requested element. + + Not range-checked. + + + + Sets the element without range checking. + + The row of the element. + The column of the element. + The value to set the element to. + WARNING: This method is not thread safe. Use "lock" with it and be sure to avoid deadlocks. + + + + Indicates whether the current object is equal to another object of the same type. + + + An object to compare with this object. + + + true if the current object is equal to the parameter; otherwise, false. + + + + + Determines whether the specified is equal to the current . + + + true if the specified is equal to the current ; otherwise, false. + + The to compare with the current . + + + + Serves as a hash function for a particular type. + + + A hash code for the current . + + + + The state array will not be modified, unless it is the same instance as the target array (which is allowed). + + + The state array will not be modified, unless it is the same instance as the target array (which is allowed). + + + The state array will not be modified, unless it is the same instance as the target array (which is allowed). + + + The state array will not be modified, unless it is the same instance as the target array (which is allowed). + + + + The array containing the row indices of the existing rows. Element "i" of the array gives the index of the + element in the array that is first non-zero element in a row "i". + The last value is equal to ValueCount, so that the number of non-zero entries in row "i" is always + given by RowPointers[i+i] - RowPointers[i]. This array thus has length RowCount+1. + + + + + An array containing the column indices of the non-zero values. Element "j" of the array + is the number of the column in matrix that contains the j-th value in the array. + + + + + Array that contains the non-zero elements of matrix. Values of the non-zero elements of matrix are mapped into the values + array using the row-major storage mapping described in a compressed sparse row (CSR) format. + + + + + Gets the number of non zero elements in the matrix. + + The number of non zero elements. + + + + True if the matrix storage format is dense. + + + + + True if all fields of this matrix can be set to any value. + False if some fields are fixed, like on a diagonal matrix. + + + + + True if the specified field can be set to any value. + False if the field is fixed, like an off-diagonal field on a diagonal matrix. + + + + + Retrieves the requested element without range checking. + + + The row of the element. + + + The column of the element. + + + The requested element. + + Not range-checked. + + + + Sets the element without range checking. + + The row of the element. + The column of the element. + The value to set the element to. + WARNING: This method is not thread safe. Use "lock" with it and be sure to avoid deadlocks. + + + + Delete value from internal storage + + Index of value in nonZeroValues array + Row number of matrix + WARNING: This method is not thread safe. Use "lock" with it and be sure to avoid deadlocks + + + + Find item Index in nonZeroValues array + + Matrix row index + Matrix column index + Item index + WARNING: This method is not thread safe. Use "lock" with it and be sure to avoid deadlocks + + + + Calculates the amount with which to grow the storage array's if they need to be + increased in size. + + The amount grown. + + + + Returns a hash code for this instance. + + + A hash code for this instance, suitable for use in hashing algorithms and data structures like a hash table. + + + + + Array that contains the indices of the non-zero values. + + + + + Array that contains the non-zero elements of the vector. + + + + + Gets the number of non-zero elements in the vector. + + + + + True if the vector storage format is dense. + + + + + Retrieves the requested element without range checking. + + + + + Sets the element without range checking. + + + + + Calculates the amount with which to grow the storage array's if they need to be + increased in size. + + The amount grown. + + + + Returns a hash code for this instance. + + + A hash code for this instance, suitable for use in hashing algorithms and data structures like a hash table. + + + + + True if the vector storage format is dense. + + + + + Gets or sets the value at the given index, with range checking. + + + The index of the element. + + The value to get or set. + This method is ranged checked. and + to get and set values without range checking. + + + + Retrieves the requested element without range checking. + + The index of the element. + The requested element. + Not range-checked. + + + + Sets the element without range checking. + + The index of the element. + The value to set the element to. + WARNING: This method is not thread safe. Use "lock" with it and be sure to avoid deadlocks. + + + + Indicates whether the current object is equal to another object of the same type. + + + An object to compare with this object. + + + true if the current object is equal to the parameter; otherwise, false. + + + + + Determines whether the specified is equal to the current . + + + true if the specified is equal to the current ; otherwise, false. + + The to compare with the current . + + + + Serves as a hash function for a particular type. + + + A hash code for the current . + + + + + Defines the generic class for Vector classes. + + Supported data types are double, single, , and . + + + + The zero value for type T. + + + + + The value of 1.0 for type T. + + + + + Negates vector and save result to + + Target vector + + + + Complex conjugates vector and save result to + + Target vector + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + The scalar to add. + The vector to store the result of the addition. + + + + Adds another vector to this vector and stores the result into the result vector. + + The vector to add to this one. + The vector to store the result of the addition. + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The vector to store the result of the subtraction. + + + + Subtracts each element of the vector from a scalar and stores the result in the result vector. + + The scalar to subtract from. + The vector to store the result of the subtraction. + + + + Subtracts another vector to this vector and stores the result into the result vector. + + The vector to subtract from this one. + The vector to store the result of the subtraction. + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + The scalar to multiply. + The vector to store the result of the multiplication. + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Computes the outer product M[i,j] = u[i]*v[j] of this and another vector and stores the result in the result matrix. + + The other vector + The matrix to store the result of the product. + + + + Divides each element of the vector by a scalar and stores the result in the result vector. + + The scalar denominator to use. + The vector to store the result of the division. + + + + Divides a scalar by each element of the vector and stores the result in the result vector. + + The scalar numerator to use. + The vector to store the result of the division. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the division. + + + + Pointwise raise this vector to an exponent and store the result into the result vector. + + The exponent to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Adds a scalar to each element of the vector. + + The scalar to add. + A copy of the vector with the scalar added. + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + The scalar to add. + The vector to store the result of the addition. + If this vector and are not the same size. + + + + Adds another vector to this vector. + + The vector to add to this one. + A new vector containing the sum of both vectors. + If this vector and are not the same size. + + + + Adds another vector to this vector and stores the result into the result vector. + + The vector to add to this one. + The vector to store the result of the addition. + If this vector and are not the same size. + If this vector and are not the same size. + + + + Subtracts a scalar from each element of the vector. + + The scalar to subtract. + A new vector containing the subtraction of this vector and the scalar. + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The vector to store the result of the subtraction. + If this vector and are not the same size. + + + + Subtracts each element of the vector from a scalar. + + The scalar to subtract from. + A new vector containing the subtraction of the scalar and this vector. + + + + Subtracts each element of the vector from a scalar and stores the result in the result vector. + + The scalar to subtract from. + The vector to store the result of the subtraction. + If this vector and are not the same size. + + + + Returns a negated vector. + + The negated vector. + Added as an alternative to the unary negation operator. + + + + Negates vector and save result to + + Target vector + + + + Subtracts another vector from this vector. + + The vector to subtract from this one. + A new vector containing the subtraction of the the two vectors. + If this vector and are not the same size. + + + + Subtracts another vector to this vector and stores the result into the result vector. + + The vector to subtract from this one. + The vector to store the result of the subtraction. + If this vector and are not the same size. + If this vector and are not the same size. + + + + Return vector with complex conjugate values of the source vector + + Conjugated vector + + + + Complex conjugates vector and save result to + + Target vector + + + + Multiplies a scalar to each element of the vector. + + The scalar to multiply. + A new vector that is the multiplication of the vector and the scalar. + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + The scalar to multiply. + The vector to store the result of the multiplication. + If this vector and are not the same size. + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + If is not of the same size. + + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + If is not of the same size. + If is . + + + + + Divides each element of the vector by a scalar. + + The scalar to divide with. + A new vector that is the division of the vector and the scalar. + + + + Divides each element of the vector by a scalar and stores the result in the result vector. + + The scalar to divide with. + The vector to store the result of the division. + If this vector and are not the same size. + + + + Divides a scalar by each element of the vector. + + The scalar to divide. + A new vector that is the division of the vector and the scalar. + + + + Divides a scalar by each element of the vector and stores the result in the result vector. + + The scalar to divide. + The vector to store the result of the division. + If this vector and are not the same size. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector containing the result. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector containing the result. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (vector % divisor), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector containing the result. + + + + Computes the remainder (vector % divisor), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (dividend % vector), where the result has the sign of the dividend, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector containing the result. + + + + Computes the remainder (dividend % vector), where the result has the sign of the dividend, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Pointwise multiplies this vector with another vector. + + The vector to pointwise multiply with this one. + A new vector which is the pointwise multiplication of the two vectors. + If this vector and are not the same size. + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + If this vector and are not the same size. + If this vector and are not the same size. + + + + Pointwise divide this vector with another vector. + + The pointwise denominator vector to use. + A new vector which is the pointwise division of the two vectors. + If this vector and are not the same size. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The vector to store the result of the pointwise division. + If this vector and are not the same size. + If this vector and are not the same size. + + + + Pointwise raise this vector to an exponent. + + The exponent to raise this vector values to. + + + + Pointwise raise this vector to an exponent and store the result into the result vector. + + The exponent to raise this vector values to. + The matrix to store the result into. + If this vector and are not the same size. + + + + Pointwise raise this vector to an exponent and store the result into the result vector. + + The exponent to raise this vector values to. + + + + Pointwise raise this vector to an exponent. + + The exponent to raise this vector values to. + The vector to store the result into. + If this vector and are not the same size. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this vector with another vector. + + The pointwise denominator vector to use. + If this vector and are not the same size. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The vector to store the result of the pointwise modulus. + If this vector and are not the same size. + If this vector and are not the same size. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this vector with another vector. + + The pointwise denominator vector to use. + If this vector and are not the same size. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The vector to store the result of the pointwise remainder. + If this vector and are not the same size. + If this vector and are not the same size. + + + + Helper function to apply a unary function to a vector. The function + f modifies the vector given to it in place. Before its + called, a copy of the 'this' vector with the same dimension is + first created, then passed to f. The copy is returned as the result + + Function which takes a vector, modifies it in place and returns void + New instance of vector which is the result + + + + Helper function to apply a unary function which modifies a vector + in place. + + Function which takes a vector, modifies it in place and returns void + The vector where the result is to be stored + If this vector and are not the same size. + + + + Helper function to apply a binary function which takes a scalar and + a vector and modifies the latter in place. A copy of the "this" + vector is therefore first made and then passed to f together with + the scalar argument. The copy is then returned as the result + + Function which takes a scalar and a vector, modifies the vector in place and returns void + The scalar to be passed to the function + The resulting vector + + + + Helper function to apply a binary function which takes a scalar and + a vector, modifies the latter in place and returns void. + + Function which takes a scalar and a vector, modifies the vector in place and returns void + The scalar to be passed to the function + If this vector and are not the same size. + + + + Helper function to apply a binary function which takes two vectors + and modifies the latter in place. A copy of the "this" vector is + first made and then passed to f together with the other vector. The + copy is then returned as the result + + Function which takes two vectors, modifies the second in place and returns void + The other vector to be passed to the function as argument. It is not modified + The resulting vector + If this vector and are not the same size. + + + + Helper function to apply a binary function which takes two vectors + and modifies the second one in place + + Function which takes two vectors, modifies the second in place and returns void + The other vector to be passed to the function as argument. It is not modified + The resulting vector + If this vector and are not the same size. + + + + Pointwise applies the exponent function to each value. + + + + + Pointwise applies the exponent function to each value. + + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the natural logarithm function to each value. + + + + + Pointwise applies the natural logarithm function to each value. + + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the abs function to each value + + + + + Pointwise applies the abs function to each value + + The vector to store the result + + + + Pointwise applies the acos function to each value + + + + + Pointwise applies the acos function to each value + + The vector to store the result + + + + Pointwise applies the asin function to each value + + + + + Pointwise applies the asin function to each value + + The vector to store the result + + + + Pointwise applies the atan function to each value + + + + + Pointwise applies the atan function to each value + + The vector to store the result + + + + Pointwise applies the atan2 function to each value of the current + vector and a given other vector being the 'x' of atan2 and the + 'this' vector being the 'y' + + + + + + + Pointwise applies the atan2 function to each value of the current + vector and a given other vector being the 'x' of atan2 and the + 'this' vector being the 'y' + + + + + + + Pointwise applies the ceiling function to each value + + + + + Pointwise applies the ceiling function to each value + + The vector to store the result + + + + Pointwise applies the cos function to each value + + + + + Pointwise applies the cos function to each value + + The vector to store the result + + + + Pointwise applies the cosh function to each value + + + + + Pointwise applies the cosh function to each value + + The vector to store the result + + + + Pointwise applies the floor function to each value + + + + + Pointwise applies the floor function to each value + + The vector to store the result + + + + Pointwise applies the log10 function to each value + + + + + Pointwise applies the log10 function to each value + + The vector to store the result + + + + Pointwise applies the round function to each value + + + + + Pointwise applies the round function to each value + + The vector to store the result + + + + Pointwise applies the sign function to each value + + + + + Pointwise applies the sign function to each value + + The vector to store the result + + + + Pointwise applies the sin function to each value + + + + + Pointwise applies the sin function to each value + + The vector to store the result + + + + Pointwise applies the sinh function to each value + + + + + Pointwise applies the sinh function to each value + + The vector to store the result + + + + Pointwise applies the sqrt function to each value + + + + + Pointwise applies the sqrt function to each value + + The vector to store the result + + + + Pointwise applies the tan function to each value + + + + + Pointwise applies the tan function to each value + + The vector to store the result + + + + Pointwise applies the tanh function to each value + + + + + Pointwise applies the tanh function to each value + + The vector to store the result + + + + Computes the outer product M[i,j] = u[i]*v[j] of this and another vector. + + The other vector + + + + Computes the outer product M[i,j] = u[i]*v[j] of this and another vector and stores the result in the result matrix. + + The other vector + The matrix to store the result of the product. + + + + Pointwise applies the minimum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the minimum with a scalar to each value. + + The scalar value to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the maximum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the maximum with a scalar to each value. + + The scalar value to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the absolute minimum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the absolute minimum with a scalar to each value. + + The scalar value to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the absolute maximum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the absolute maximum with a scalar to each value. + + The scalar value to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the minimum with the values of another vector to each value. + + The vector with the values to compare to. + + + + Pointwise applies the minimum with the values of another vector to each value. + + The vector with the values to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the maximum with the values of another vector to each value. + + The vector with the values to compare to. + + + + Pointwise applies the maximum with the values of another vector to each value. + + The vector with the values to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the absolute minimum with the values of another vector to each value. + + The vector with the values to compare to. + + + + Pointwise applies the absolute minimum with the values of another vector to each value. + + The vector with the values to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the absolute maximum with the values of another vector to each value. + + The vector with the values to compare to. + + + + Pointwise applies the absolute maximum with the values of another vector to each value. + + The vector with the values to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = (sum(abs(this[i])^p))^(1/p) + + + + Normalizes this vector to a unit vector with respect to the p-norm. + + The p value. + This vector normalized to a unit vector with respect to the p-norm. + + + + Returns the value of the absolute minimum element. + + The value of the absolute minimum element. + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the value of the absolute maximum element. + + The value of the absolute maximum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Returns the value of maximum element. + + The value of maximum element. + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the value of the minimum element. + + The value of the minimum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Computes the sum of the absolute value of the vector's elements. + + The sum of the absolute value of the vector's elements. + + + + Indicates whether the current object is equal to another object of the same type. + + An object to compare with this object. + + true if the current object is equal to the parameter; otherwise, false. + + + + + Determines whether the specified is equal to this instance. + + The to compare with this instance. + + true if the specified is equal to this instance; otherwise, false. + + + + + Returns a hash code for this instance. + + + A hash code for this instance, suitable for use in hashing algorithms and data structures like a hash table. + + + + + Creates a new object that is a copy of the current instance. + + + A new object that is a copy of this instance. + + + + + Returns an enumerator that iterates through the collection. + + + A that can be used to iterate through the collection. + + + + + Returns an enumerator that iterates through a collection. + + + An object that can be used to iterate through the collection. + + + + + Returns a string that describes the type, dimensions and shape of this vector. + + + + + Returns a string that represents the content of this vector, column by column. + + Maximum number of entries and thus lines per column. Typical value: 12; Minimum: 3. + Maximum number of chatacters per line over all columns. Typical value: 80; Minimum: 16. + Character to use to print if there is not enough space to print all entries. Typical value: "..". + Character to use to separate two coluns on a line. Typical value: " " (2 spaces). + Character to use to separate two rows/lines. Typical value: Environment.NewLine. + Function to provide a string for any given entry value. + + + + Returns a string that represents the content of this vector, column by column. + + Maximum number of entries and thus lines per column. Typical value: 12; Minimum: 3. + Maximum number of chatacters per line over all columns. Typical value: 80; Minimum: 16. + Floating point format string. Can be null. Default value: G6. + Format provider or culture. Can be null. + + + + Returns a string that represents the content of this vector, column by column. + + Floating point format string. Can be null. Default value: G6. + Format provider or culture. Can be null. + + + + Returns a string that summarizes this vector, column by column and with a type header. + + Maximum number of entries and thus lines per column. Typical value: 12; Minimum: 3. + Maximum number of chatacters per line over all columns. Typical value: 80; Minimum: 16. + Floating point format string. Can be null. Default value: G6. + Format provider or culture. Can be null. + + + + Returns a string that summarizes this vector. + The maximum number of cells can be configured in the class. + + + + + Returns a string that summarizes this vector. + The maximum number of cells can be configured in the class. + The format string is ignored. + + + + + Initializes a new instance of the Vector class. + + + + + Gets the raw vector data storage. + + + + + Gets the length or number of dimensions of this vector. + + + + Gets or sets the value at the given . + The index of the value to get or set. + The value of the vector at the given . + If is negative or + greater than the size of the vector. + + + Gets the value at the given without range checking.. + The index of the value to get or set. + The value of the vector at the given . + + + Sets the at the given without range checking.. + The index of the value to get or set. + The value to set. + + + + Resets all values to zero. + + + + + Sets all values of a subvector to zero. + + + + + Set all values whose absolute value is smaller than the threshold to zero, in-place. + + + + + Set all values that meet the predicate to zero, in-place. + + + + + Returns a deep-copy clone of the vector. + + A deep-copy clone of the vector. + + + + Set the values of this vector to the given values. + + The array containing the values to use. + If is . + If is not the same size as this vector. + + + + Copies the values of this vector into the target vector. + + The vector to copy elements into. + If is . + If is not the same size as this vector. + + + + Creates a vector containing specified elements. + + The first element to begin copying from. + The number of elements to copy. + A vector containing a copy of the specified elements. + If is not positive or + greater than or equal to the size of the vector. + If + is greater than or equal to the size of the vector. + + If is not positive. + + + + Copies the values of a given vector into a region in this vector. + + The field to start copying to + The number of fields to cpy. Must be positive. + The sub-vector to copy from. + If is + + + + Copies the requested elements from this vector to another. + + The vector to copy the elements to. + The element to start copying from. + The element to start copying to. + The number of elements to copy. + + + + Returns the data contained in the vector as an array. + The returned array will be independent from this vector. + A new memory block will be allocated for the array. + + The vector's data as an array. + + + + Returns the internal array of this vector if, and only if, this vector is stored by such an array internally. + Otherwise returns null. Changes to the returned array and the vector will affect each other. + Use ToArray instead if you always need an independent array. + + + + + Create a matrix based on this vector in column form (one single column). + + + This vector as a column matrix. + + + + + Create a matrix based on this vector in row form (one single row). + + + This vector as a row matrix. + + + + + Returns an IEnumerable that can be used to iterate through all values of the vector. + + + The enumerator will include all values, even if they are zero. + + + + + Returns an IEnumerable that can be used to iterate through all values of the vector. + + + The enumerator will include all values, even if they are zero. + + + + + Returns an IEnumerable that can be used to iterate through all values of the vector and their index. + + + The enumerator returns a Tuple with the first value being the element index + and the second value being the value of the element at that index. + The enumerator will include all values, even if they are zero. + + + + + Returns an IEnumerable that can be used to iterate through all values of the vector and their index. + + + The enumerator returns a Tuple with the first value being the element index + and the second value being the value of the element at that index. + The enumerator will include all values, even if they are zero. + + + + + Returns an IEnumerable that can be used to iterate through all non-zero values of the vector. + + + The enumerator will skip all elements with a zero value. + + + + + Returns an IEnumerable that can be used to iterate through all non-zero values of the vector and their index. + + + The enumerator returns a Tuple with the first value being the element index + and the second value being the value of the element at that index. + The enumerator will skip all elements with a zero value. + + + + + Applies a function to each value of this vector and replaces the value with its result. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value of this vector and replaces the value with its result. + The index of each value (zero-based) is passed as first argument to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value of this vector and replaces the value in the result vector. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value of this vector and replaces the value in the result vector. + The index of each value (zero-based) is passed as first argument to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value of this vector and replaces the value in the result vector. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value of this vector and replaces the value in the result vector. + The index of each value (zero-based) is passed as first argument to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value of this vector and returns the results as a new vector. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value of this vector and returns the results as a new vector. + The index of each value (zero-based) is passed as first argument to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value pair of two vectors and replaces the value in the result vector. + + + + + Applies a function to each value pair of two vectors and returns the results as a new vector. + + + + + Applies a function to update the status with each value pair of two vectors and returns the resulting status. + + + + + Returns a tuple with the index and value of the first element satisfying a predicate, or null if none is found. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns a tuple with the index and values of the first element pair of two vectors of the same size satisfying a predicate, or null if none is found. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if at least one element satisfies a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if at least one element pairs of two vectors of the same size satisfies a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if all elements satisfy a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if all element pairs of two vectors of the same size satisfy a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns a Vector containing the same values of . + + This method is included for completeness. + The vector to get the values from. + A vector containing the same values as . + If is . + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Adds a scalar to each element of a vector. + + The vector to add to. + The scalar value to add. + The result of the addition. + If is . + + + + Adds a scalar to each element of a vector. + + The scalar value to add. + The vector to add to. + The result of the addition. + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Subtracts a scalar from each element of a vector. + + The vector to subtract from. + The scalar value to subtract. + The result of the subtraction. + If is . + + + + Substracts each element of a vector from a scalar. + + The scalar value to subtract from. + The vector to subtract. + The result of the subtraction. + If is . + + + + Multiplies a vector with a scalar. + + The vector to scale. + The scalar value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a scalar. + + The scalar value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a scalar with a vector. + + The scalar to divide. + The vector. + The result of the division. + If is . + + + + Divides a vector with a scalar. + + The vector to divide. + The scalar value. + The result of the division. + If is . + + + + Pointwise divides two Vectors. + + The vector to divide. + The other vector. + The result of the division. + If and are not the same size. + If is . + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + of each element of the vector of the given divisor. + + The vector whose elements we want to compute the remainder of. + The divisor to use. + If is . + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + of the given dividend of each element of the vector. + + The dividend we want to compute the remainder of. + The vector whose elements we want to use as divisor. + If is . + + + + Computes the pointwise remainder (% operator), where the result has the sign of the dividend, + of each element of two vectors. + + The vector whose elements we want to compute the remainder of. + The divisor to use. + If and are not the same size. + If is . + + + + Computes the sqrt of a vector pointwise + + The input vector + + + + + Computes the exponential of a vector pointwise + + The input vector + + + + + Computes the log of a vector pointwise + + The input vector + + + + + Computes the log10 of a vector pointwise + + The input vector + + + + + Computes the sin of a vector pointwise + + The input vector + + + + + Computes the cos of a vector pointwise + + The input vector + + + + + Computes the tan of a vector pointwise + + The input vector + + + + + Computes the asin of a vector pointwise + + The input vector + + + + + Computes the acos of a vector pointwise + + The input vector + + + + + Computes the atan of a vector pointwise + + The input vector + + + + + Computes the sinh of a vector pointwise + + The input vector + + + + + Computes the cosh of a vector pointwise + + The input vector + + + + + Computes the tanh of a vector pointwise + + The input vector + + + + + Computes the absolute value of a vector pointwise + + The input vector + + + + + Computes the floor of a vector pointwise + + The input vector + + + + + Computes the ceiling of a vector pointwise + + The input vector + + + + + Computes the rounded value of a vector pointwise + + The input vector + + + + + Converts a vector to single precision. + + + + + Converts a vector to double precision. + + + + + Converts a vector to single precision complex numbers. + + + + + Converts a vector to double precision complex numbers. + + + + + Gets a single precision complex vector with the real parts from the given vector. + + + + + Gets a double precision complex vector with the real parts from the given vector. + + + + + Gets a real vector representing the real parts of a complex vector. + + + + + Gets a real vector representing the real parts of a complex vector. + + + + + Gets a real vector representing the imaginary parts of a complex vector. + + + + + Gets a real vector representing the imaginary parts of a complex vector. + + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + + Predictor matrix X + Response vector Y + The direct method to be used to compute the regression. + Best fitting vector for model parameters β + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + + Predictor matrix X + Response matrix Y + The direct method to be used to compute the regression. + Best fitting vector for model parameters β + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + + List of predictor-arrays. + List of responses + True if an intercept should be added as first artificial predictor value. Default = false. + The direct method to be used to compute the regression. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + Uses the cholesky decomposition of the normal equations. + + Sequence of predictor-arrays and their response. + True if an intercept should be added as first artificial predictor value. Default = false. + The direct method to be used to compute the regression. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + Uses the cholesky decomposition of the normal equations. + + Predictor matrix X + Response vector Y + Best fitting vector for model parameters β + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + Uses the cholesky decomposition of the normal equations. + + Predictor matrix X + Response matrix Y + Best fitting vector for model parameters β + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + Uses the cholesky decomposition of the normal equations. + + List of predictor-arrays. + List of responses + True if an intercept should be added as first artificial predictor value. Default = false. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + Uses the cholesky decomposition of the normal equations. + + Sequence of predictor-arrays and their response. + True if an intercept should be added as first artificial predictor value. Default = false. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + Uses an orthogonal decomposition and is therefore more numerically stable than the normal equations but also slower. + + Predictor matrix X + Response vector Y + Best fitting vector for model parameters β + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + Uses an orthogonal decomposition and is therefore more numerically stable than the normal equations but also slower. + + Predictor matrix X + Response matrix Y + Best fitting vector for model parameters β + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + Uses an orthogonal decomposition and is therefore more numerically stable than the normal equations but also slower. + + List of predictor-arrays. + List of responses + True if an intercept should be added as first artificial predictor value. Default = false. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + Uses an orthogonal decomposition and is therefore more numerically stable than the normal equations but also slower. + + Sequence of predictor-arrays and their response. + True if an intercept should be added as first artificial predictor value. Default = false. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + Uses a singular value decomposition and is therefore more numerically stable (especially if ill-conditioned) than the normal equations or QR but also slower. + + Predictor matrix X + Response vector Y + Best fitting vector for model parameters β + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + Uses a singular value decomposition and is therefore more numerically stable (especially if ill-conditioned) than the normal equations or QR but also slower. + + Predictor matrix X + Response matrix Y + Best fitting vector for model parameters β + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + Uses a singular value decomposition and is therefore more numerically stable (especially if ill-conditioned) than the normal equations or QR but also slower. + + List of predictor-arrays. + List of responses + True if an intercept should be added as first artificial predictor value. Default = false. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + Uses a singular value decomposition and is therefore more numerically stable (especially if ill-conditioned) than the normal equations or QR but also slower. + + Sequence of predictor-arrays and their response. + True if an intercept should be added as first artificial predictor value. Default = false. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Least-Squares fitting the points (x,y) to a line y : x -> a+b*x, + returning its best fitting parameters as (a, b) tuple, + where a is the intercept and b the slope. + + Predictor (independent) + Response (dependent) + + + + Least-Squares fitting the points (x,y) to a line y : x -> a+b*x, + returning its best fitting parameters as (a, b) tuple, + where a is the intercept and b the slope. + + Predictor-Response samples as tuples + + + + Weighted Linear Regression using normal equations. + + Predictor matrix X + Response vector Y + Weight matrix W, usually diagonal with an entry for each predictor (row). + + + + Weighted Linear Regression using normal equations. + + Predictor matrix X + Response matrix Y + Weight matrix W, usually diagonal with an entry for each predictor (row). + + + + Weighted Linear Regression using normal equations. + + Predictor matrix X + Response vector Y + Weight matrix W, usually diagonal with an entry for each predictor (row). + True if an intercept should be added as first artificial predictor value. Default = false. + + + + Weighted Linear Regression using normal equations. + + List of sample vectors (predictor) together with their response. + List of weights, one for each sample. + True if an intercept should be added as first artificial predictor value. Default = false. + + + + Locally-Weighted Linear Regression using normal equations. + + + + + Locally-Weighted Linear Regression using normal equations. + + + + + First Order AB method(same as Forward Euler) + + Initial value + Start Time + End Time + Size of output array(the larger, the finer) + ode model + approximation with size N + + + + Second Order AB Method + + Initial value 1 + Start Time + End Time + Size of output array(the larger, the finer) + ode model + approximation with size N + + + + Third Order AB Method + + Initial value 1 + Start Time + End Time + Size of output array(the larger, the finer) + ode model + approximation with size N + + + + Fourth Order AB Method + + Initial value 1 + Start Time + End Time + Size of output array(the larger, the finer) + ode model + approximation with size N + + + + ODE Solver Algorithms + + + + + Second Order Runge-Kutta method + + initial value + start time + end time + Size of output array(the larger, the finer) + ode function + approximations + + + + Fourth Order Runge-Kutta method + + initial value + start time + end time + Size of output array(the larger, the finer) + ode function + approximations + + + + Second Order Runge-Kutta to solve ODE SYSTEM + + initial vector + start time + end time + Size of output array(the larger, the finer) + ode function + approximations + + + + Fourth Order Runge-Kutta to solve ODE SYSTEM + + initial vector + start time + end time + Size of output array(the larger, the finer) + ode function + approximations + + + + Class to represent a permutation for a subset of the natural numbers. + + + + + Entry _indices[i] represents the location to which i is permuted to. + + + + + Initializes a new instance of the Permutation class. + + An array which represents where each integer is permuted too: indices[i] represents that integer i + is permuted to location indices[i]. + + + + Gets the number of elements this permutation is over. + + + + + Computes where permutes too. + + The index to permute from. + The index which is permuted to. + + + + Computes the inverse of the permutation. + + The inverse of the permutation. + + + + Construct an array from a sequence of inversions. + + + From wikipedia: the permutation 12043 has the inversions (0,2), (1,2) and (3,4). This would be + encoded using the array [22244]. + + The set of inversions to construct the permutation from. + A permutation generated from a sequence of inversions. + + + + Construct a sequence of inversions from the permutation. + + + From wikipedia: the permutation 12043 has the inversions (0,2), (1,2) and (3,4). This would be + encoded using the array [22244]. + + A sequence of inversions. + + + + Checks whether the array represents a proper permutation. + + An array which represents where each integer is permuted too: indices[i] represents that integer i + is permuted to location indices[i]. + True if represents a proper permutation, false otherwise. + + + + Utilities for working with floating point numbers. + + + + Useful links: + + + http://docs.sun.com/source/806-3568/ncg_goldberg.html#689 - What every computer scientist should know about floating-point arithmetic + + + http://en.wikipedia.org/wiki/Machine_epsilon - Gives the definition of machine epsilon + + + + + + + + Compares two doubles and determines which double is bigger. + a < b -> -1; a ~= b (almost equal according to parameter) -> 0; a > b -> +1. + + The first value. + The second value. + The absolute accuracy required for being almost equal. + + + + Compares two doubles and determines which double is bigger. + a < b -> -1; a ~= b (almost equal according to parameter) -> 0; a > b -> +1. + + The first value. + The second value. + The number of decimal places on which the values must be compared. Must be 1 or larger. + + + + Compares two doubles and determines which double is bigger. + a < b -> -1; a ~= b (almost equal according to parameter) -> 0; a > b -> +1. + + The first value. + The second value. + The relative accuracy required for being almost equal. + + + + Compares two doubles and determines which double is bigger. + a < b -> -1; a ~= b (almost equal according to parameter) -> 0; a > b -> +1. + + The first value. + The second value. + The number of decimal places on which the values must be compared. Must be 1 or larger. + + + + Compares two doubles and determines which double is bigger. + a < b -> -1; a ~= b (almost equal according to parameter) -> 0; a > b -> +1. + + The first value. + The second value. + The maximum error in terms of Units in Last Place (ulps), i.e. the maximum number of decimals that may be different. Must be 1 or larger. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The absolute accuracy required for being almost equal. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The absolute accuracy required for being almost equal. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The relative accuracy required for being almost equal. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The relative accuracy required for being almost equal. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the tolerance or not. Equality comparison is based on the binary representation. + + The first value. + The second value. + The maximum number of floating point values for which the two values are considered equal. Must be 1 or larger. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the tolerance or not. Equality comparison is based on the binary representation. + + The first value. + The second value. + The maximum number of floating point values for which the two values are considered equal. Must be 1 or larger. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of thg. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of thg. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The absolute accuracy required for being almost equal. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The absolute accuracy required for being almost equal. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The number of decimal places. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The number of decimal places. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The relative accuracy required for being almost equal. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The relative accuracy required for being almost equal. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the tolerance or not. Equality comparison is based on the binary representation. + + The first value. + The second value. + The maximum number of floating point values for which the two values are considered equal. Must be 1 or larger. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the tolerance or not. Equality comparison is based on the binary representation. + + The first value. + The second value. + The maximum number of floating point values for which the two values are considered equal. Must be 1 or larger. + true if the first value is smaller than the second value; otherwise false. + + + + The number of binary digits used to represent the binary number for a double precision floating + point value. i.e. there are this many digits used to represent the + actual number, where in a number as: 0.134556 * 10^5 the digits are 0.134556 and the exponent is 5. + + + + + The number of binary digits used to represent the binary number for a single precision floating + point value. i.e. there are this many digits used to represent the + actual number, where in a number as: 0.134556 * 10^5 the digits are 0.134556 and the exponent is 5. + + + + + Standard epsilon, the maximum relative precision of IEEE 754 double-precision floating numbers (64 bit). + According to the definition of Prof. Demmel and used in LAPACK and Scilab. + + + + + Standard epsilon, the maximum relative precision of IEEE 754 double-precision floating numbers (64 bit). + According to the definition of Prof. Higham and used in the ISO C standard and MATLAB. + + + + + Standard epsilon, the maximum relative precision of IEEE 754 single-precision floating numbers (32 bit). + According to the definition of Prof. Demmel and used in LAPACK and Scilab. + + + + + Standard epsilon, the maximum relative precision of IEEE 754 single-precision floating numbers (32 bit). + According to the definition of Prof. Higham and used in the ISO C standard and MATLAB. + + + + + Actual double precision machine epsilon, the smallest number that can be subtracted from 1, yielding a results different than 1. + This is also known as unit roundoff error. According to the definition of Prof. Demmel. + On a standard machine this is equivalent to `DoublePrecision`. + + + + + Actual double precision machine epsilon, the smallest number that can be added to 1, yielding a results different than 1. + This is also known as unit roundoff error. According to the definition of Prof. Higham. + On a standard machine this is equivalent to `PositiveDoublePrecision`. + + + + + The number of significant decimal places of double-precision floating numbers (64 bit). + + + + + The number of significant decimal places of single-precision floating numbers (32 bit). + + + + + Value representing 10 * 2^(-53) = 1.11022302462516E-15 + + + + + Value representing 10 * 2^(-24) = 5.96046447753906E-07 + + + + + Returns the magnitude of the number. + + The value. + The magnitude of the number. + + + + Returns the magnitude of the number. + + The value. + The magnitude of the number. + + + + Returns the number divided by it's magnitude, effectively returning a number between -10 and 10. + + The value. + The value of the number. + + + + Returns a 'directional' long value. This is a long value which acts the same as a double, + e.g. a negative double value will return a negative double value starting at 0 and going + more negative as the double value gets more negative. + + The input double value. + A long value which is roughly the equivalent of the double value. + + + + Returns a 'directional' int value. This is a int value which acts the same as a float, + e.g. a negative float value will return a negative int value starting at 0 and going + more negative as the float value gets more negative. + + The input float value. + An int value which is roughly the equivalent of the double value. + + + + Increments a floating point number to the next bigger number representable by the data type. + + The value which needs to be incremented. + How many times the number should be incremented. + + The incrementation step length depends on the provided value. + Increment(double.MaxValue) will return positive infinity. + + The next larger floating point value. + + + + Decrements a floating point number to the next smaller number representable by the data type. + + The value which should be decremented. + How many times the number should be decremented. + + The decrementation step length depends on the provided value. + Decrement(double.MinValue) will return negative infinity. + + The next smaller floating point value. + + + + Forces small numbers near zero to zero, according to the specified absolute accuracy. + + The real number to coerce to zero, if it is almost zero. + The maximum count of numbers between the zero and the number . + + Zero if || is fewer than numbers from zero, otherwise. + + + + + Forces small numbers near zero to zero, according to the specified absolute accuracy. + + The real number to coerce to zero, if it is almost zero. + The maximum count of numbers between the zero and the number . + + Zero if || is fewer than numbers from zero, otherwise. + + + Thrown if is smaller than zero. + + + + + Forces small numbers near zero to zero, according to the specified absolute accuracy. + + The real number to coerce to zero, if it is almost zero. + The absolute threshold for to consider it as zero. + Zero if || is smaller than , otherwise. + + Thrown if is smaller than zero. + + + + + Forces small numbers near zero to zero. + + The real number to coerce to zero, if it is almost zero. + Zero if || is smaller than 2^(-53) = 1.11e-16, otherwise. + + + + Determines the range of floating point numbers that will match the specified value with the given tolerance. + + The value. + The ulps difference. + + Thrown if is smaller than zero. + + Tuple of the bottom and top range ends. + + + + Returns the floating point number that will match the value with the tolerance on the maximum size (i.e. the result is + always bigger than the value) + + The value. + The ulps difference. + The maximum floating point number which is larger than the given . + + + + Returns the floating point number that will match the value with the tolerance on the minimum size (i.e. the result is + always smaller than the value) + + The value. + The ulps difference. + The minimum floating point number which is smaller than the given . + + + + Determines the range of ulps that will match the specified value with the given tolerance. + + The value. + The relative difference. + + Thrown if is smaller than zero. + + + Thrown if is double.PositiveInfinity or double.NegativeInfinity. + + + Thrown if is double.NaN. + + + Tuple with the number of ULPS between the value and the value - relativeDifference as first, + and the number of ULPS between the value and the value + relativeDifference as second value. + + + + + Evaluates the count of numbers between two double numbers + + The first parameter. + The second parameter. + The second number is included in the number, thus two equal numbers evaluate to zero and two neighbor numbers evaluate to one. Therefore, what is returned is actually the count of numbers between plus 1. + The number of floating point values between and . + + Thrown if is double.PositiveInfinity or double.NegativeInfinity. + + + Thrown if is double.NaN. + + + Thrown if is double.PositiveInfinity or double.NegativeInfinity. + + + Thrown if is double.NaN. + + + + + Evaluates the minimum distance to the next distinguishable number near the argument value. + + The value used to determine the minimum distance. + + Relative Epsilon (positive double or NaN). + + Evaluates the negative epsilon. The more common positive epsilon is equal to two times this negative epsilon. + + + + + Evaluates the minimum distance to the next distinguishable number near the argument value. + + The value used to determine the minimum distance. + + Relative Epsilon (positive float or NaN). + + Evaluates the negative epsilon. The more common positive epsilon is equal to two times this negative epsilon. + + + + + Evaluates the minimum distance to the next distinguishable number near the argument value. + + The value used to determine the minimum distance. + Relative Epsilon (positive double or NaN) + Evaluates the positive epsilon. See also + + + + + Evaluates the minimum distance to the next distinguishable number near the argument value. + + The value used to determine the minimum distance. + Relative Epsilon (positive float or NaN) + Evaluates the positive epsilon. See also + + + + + Calculates the actual (negative) double precision machine epsilon - the smallest number that can be subtracted from 1, yielding a results different than 1. + This is also known as unit roundoff error. According to the definition of Prof. Demmel. + + Positive Machine epsilon + + + + Calculates the actual positive double precision machine epsilon - the smallest number that can be added to 1, yielding a results different than 1. + This is also known as unit roundoff error. According to the definition of Prof. Higham. + + Machine epsilon + + + + Compares two doubles and determines if they are equal + within the specified maximum absolute error. + + The norm of the first value (can be negative). + The norm of the second value (can be negative). + The norm of the difference of the two values (can be negative). + The absolute accuracy required for being almost equal. + True if both doubles are almost equal up to the specified maximum absolute error, false otherwise. + + + + Compares two doubles and determines if they are equal + within the specified maximum absolute error. + + The first value. + The second value. + The absolute accuracy required for being almost equal. + True if both doubles are almost equal up to the specified maximum absolute error, false otherwise. + + + + Compares two doubles and determines if they are equal + within the specified maximum error. + + The norm of the first value (can be negative). + The norm of the second value (can be negative). + The norm of the difference of the two values (can be negative). + The accuracy required for being almost equal. + True if both doubles are almost equal up to the specified maximum error, false otherwise. + + + + Compares two doubles and determines if they are equal + within the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + True if both doubles are almost equal up to the specified maximum error, false otherwise. + + + + Compares two doubles and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two complex and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two complex and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two complex and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two doubles and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two complex and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two complex and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two complex and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Checks whether two real numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Checks whether two real numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Checks whether two Compex numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Checks whether two Compex numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Checks whether two real numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Checks whether two real numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Checks whether two Compex numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Checks whether two Compex numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not, using the + number of decimal places as an absolute measure. + + + + The values are equal if the difference between the two numbers is smaller than 0.5e-decimalPlaces. We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The norm of the first value (can be negative). + The norm of the second value (can be negative). + The norm of the difference of the two values (can be negative). + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not, using the + number of decimal places as an absolute measure. + + + + The values are equal if the difference between the two numbers is smaller than 0.5e-decimalPlaces. We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not. If the numbers + are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The norm of the first value (can be negative). + The norm of the second value (can be negative). + The norm of the difference of the two values (can be negative). + The number of decimal places. + Thrown if is smaller than zero. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not. If the numbers + are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not, using the + number of decimal places as an absolute measure. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not, using the + number of decimal places as an absolute measure. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not, using the + number of decimal places as an absolute measure. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not, using the + number of decimal places as an absolute measure. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not. If the numbers + are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not. If the numbers + are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not. If the numbers + are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not. If the numbers + are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the tolerance or not. Equality comparison is based on the binary representation. + + + + Determines the 'number' of floating point numbers between two values (i.e. the number of discrete steps + between the two numbers) and then checks if that is within the specified tolerance. So if a tolerance + of 1 is passed then the result will be true only if the two numbers have the same binary representation + OR if they are two adjacent numbers that only differ by one step. + + + The comparison method used is explained in http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm . The article + at http://www.extremeoptimization.com/resources/Articles/FPDotNetConceptsAndFormats.aspx explains how to transform the C code to + .NET enabled code without using pointers and unsafe code. + + + The first value. + The second value. + The maximum number of floating point values between the two values. Must be 1 or larger. + Thrown if is smaller than one. + + + + Compares two floats and determines if they are equal to within the tolerance or not. Equality comparison is based on the binary representation. + + The first value. + The second value. + The maximum number of floating point values between the two values. Must be 1 or larger. + Thrown if is smaller than one. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two vectors and determines if they are equal within the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two vectors and determines if they are equal within the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two vectors and determines if they are equal to within the specified number + of decimal places or not, using the number of decimal places as an absolute measure. + + The first value. + The second value. + The number of decimal places. + + + + Compares two vectors and determines if they are equal to within the specified number of decimal places or not. + If the numbers are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + The first value. + The second value. + The number of decimal places. + + + + Compares two matrices and determines if they are equal within the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two matrices and determines if they are equal within the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two matrices and determines if they are equal to within the specified number + of decimal places or not, using the number of decimal places as an absolute measure. + + The first value. + The second value. + The number of decimal places. + + + + Compares two matrices and determines if they are equal to within the specified number of decimal places or not. + If the numbers are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + The first value. + The second value. + The number of decimal places. + + + + Support Interface for Precision Operations (like AlmostEquals). + + Type of the implementing class. + + + + Returns a Norm of a value of this type, which is appropriate for measuring how + close this value is to zero. + + A norm of this value. + + + + Returns a Norm of the difference of two values of this type, which is + appropriate for measuring how close together these two values are. + + The value to compare with. + A norm of the difference between this and the other value. + + + + Consistency vs. performance trade-off between runs on different machines. + + + + Consistent on the same CPU only (maximum performance) + + + Consistent on Intel and compatible CPUs with SSE2 support (maximum compatibility) + + + Consistent on Intel CPUs supporting SSE2 or later + + + Consistent on Intel CPUs supporting SSE4.2 or later + + + Consistent on Intel CPUs supporting AVX or later + + + Consistent on Intel CPUs supporting AVX2 or later + + + + Use the best provider available. + + + + + Use a specific provider if configured, e.g. using the + "MathNetNumericsFFTProvider" environment variable, + or fall back to the best provider. + + + + + Try to find out whether the provider is available, at least in principle. + Verification may still fail if available, but it will certainly fail if unavailable. + + + + + Initialize and verify that the provided is indeed available. If not, fall back to alternatives like the managed provider + + + + + Try to find out whether the provider is available, at least in principle. + Verification may still fail if available, but it will certainly fail if unavailable. + + + + + Initialize and verify that the provided is indeed available. If not, fall back to alternatives like the managed provider + + + + + How to transpose a matrix. + + + + + Don't transpose a matrix. + + + + + Transpose a matrix. + + + + + Conjugate transpose a complex matrix. + + If a conjugate transpose is used with a real matrix, then the matrix is just transposed. + + + + Types of matrix norms. + + + + + The 1-norm. + + + + + The Frobenius norm. + + + + + The infinity norm. + + + + + The largest absolute value norm. + + + + + Interface to linear algebra algorithms that work off 1-D arrays. + + + + + Try to find out whether the provider is available, at least in principle. + Verification may still fail if available, but it will certainly fail if unavailable. + + + + + Initialize and verify that the provided is indeed available. If not, fall back to alternatives like the managed provider + + + + + Interface to linear algebra algorithms that work off 1-D arrays. + + Supported data types are Double, Single, Complex, and Complex32. + + + + Adds a scaled vector to another: result = y + alpha*x. + + The vector to update. + The value to scale by. + The vector to add to . + The result of the addition. + This is similar to the AXPY BLAS routine. + + + + Scales an array. Can be used to scale a vector and a matrix. + + The scalar. + The values to scale. + This result of the scaling. + This is similar to the SCAL BLAS routine. + + + + Conjugates an array. Can be used to conjugate a vector and a matrix. + + The values to conjugate. + This result of the conjugation. + + + + Computes the dot product of x and y. + + The vector x. + The vector y. + The dot product of x and y. + This is equivalent to the DOT BLAS routine. + + + + Does a point wise add of two arrays z = x + y. This can be used + to add vectors or matrices. + + The array x. + The array y. + The result of the addition. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise subtraction of two arrays z = x - y. This can be used + to subtract vectors or matrices. + + The array x. + The array y. + The result of the subtraction. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise multiplication of two arrays z = x * y. This can be used + to multiply elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise multiplication. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise division of two arrays z = x / y. This can be used + to divide elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise division. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise power of two arrays z = x ^ y. This can be used + to raise elements of vectors or matrices to the powers of another vector or matrix. + + The array x. + The array y. + The result of the point wise power. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Computes the requested of the matrix. + + The type of norm to compute. + The number of rows. + The number of columns. + The matrix to compute the norm from. + + The requested of the matrix. + + + + + Multiples two matrices. result = x * y + + The x matrix. + The number of rows in the x matrix. + The number of columns in the x matrix. + The y matrix. + The number of rows in the y matrix. + The number of columns in the y matrix. + Where to store the result of the multiplication. + This is a simplified version of the BLAS GEMM routine with alpha + set to 1.0 and beta set to 0.0, and x and y are not transposed. + + + + Multiplies two matrices and updates another with the result. c = alpha*op(a)*op(b) + beta*c + + How to transpose the matrix. + How to transpose the matrix. + The value to scale matrix. + The a matrix. + The number of rows in the matrix. + The number of columns in the matrix. + The b matrix + The number of rows in the matrix. + The number of columns in the matrix. + The value to scale the matrix. + The c matrix. + + + + Computes the LUP factorization of A. P*A = L*U. + + An by matrix. The matrix is overwritten with the + the LU factorization on exit. The lower triangular factor L is stored in under the diagonal of (the diagonal is always 1.0 + for the L factor). The upper triangular factor U is stored on and above the diagonal of . + The order of the square matrix . + On exit, it contains the pivot indices. The size of the array must be . + This is equivalent to the GETRF LAPACK routine. + + + + Computes the inverse of matrix using LU factorization. + + The N by N matrix to invert. Contains the inverse On exit. + The order of the square matrix . + This is equivalent to the GETRF and GETRI LAPACK routines. + + + + Computes the inverse of a previously factored matrix. + + The LU factored N by N matrix. Contains the inverse On exit. + The order of the square matrix . + The pivot indices of . + This is equivalent to the GETRI LAPACK routine. + + + + Solves A*X=B for X using LU factorization. + + The number of columns of B. + The square matrix A. + The order of the square matrix . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRF and GETRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The number of columns of B. + The factored A matrix. + The order of the square matrix . + The pivot indices of . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRS LAPACK routine. + + + + Computes the Cholesky factorization of A. + + On entry, a square, positive definite matrix. On exit, the matrix is overwritten with the + the Cholesky factorization. + The number of rows or columns in the matrix. + This is equivalent to the POTRF LAPACK routine. + + + + Solves A*X=B for X using Cholesky factorization. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRF add POTRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRS LAPACK routine. + + + + Computes the full QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the R matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A M by M matrix that holds the Q matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Computes the thin QR factorization of A where M > N. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the Q matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A N by N matrix that holds the R matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Solves A*X=B for X using QR factorization of A. + + The A matrix. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Solves A*X=B for X using a previously QR factored matrix. + + The Q matrix obtained by QR factor. This is only used for the managed provider and can be + null for the native provider. The native provider uses the Q portion stored in the R matrix. + The R matrix obtained by calling . + The number of rows in the A matrix. + The number of columns in the A matrix. + Contains additional information on Q. Only used for the native solver + and can be null for the managed provider. + On entry the B matrix; on exit the X matrix. + The number of columns of B. + On exit, the solution matrix. + Rows must be greater or equal to columns. + The type of QR factorization to perform. + + + + Computes the singular value decomposition of A. + + Compute the singular U and VT vectors or not. + On entry, the M by N matrix to decompose. On exit, A may be overwritten. + The number of rows in the A matrix. + The number of columns in the A matrix. + The singular values of A in ascending value. + If is true, on exit U contains the left + singular vectors. + If is true, on exit VT contains the transposed + right singular vectors. + This is equivalent to the GESVD LAPACK routine. + + + + Solves A*X=B for X using the singular value decomposition of A. + + On entry, the M by N matrix to decompose. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Solves A*X=B for X using a previously SVD decomposed matrix. + + The number of rows in the A matrix. + The number of columns in the A matrix. + The s values returned by . + The left singular vectors returned by . + The right singular vectors returned by . + The B matrix + The number of columns of B. + On exit, the solution matrix. + + + + Computes the eigenvalues and eigenvectors of a matrix. + + Whether the matrix is symmetric or not. + The order of the matrix. + The matrix to decompose. The lenth of the array must be order * order. + On output, the matrix contains the eigen vectors. The lenth of the array must be order * order. + On output, the eigen values (λ) of matrix in ascending value. The length of the arry must . + On output, the block diagonal eigenvalue matrix. The lenth of the array must be order * order. + + + + Use the best provider available. + + + + + Use a specific provider if configured, e.g. using the + "MathNetNumericsLAProvider" environment variable, + or fall back to the best provider. + + + + + The managed linear algebra provider. + + + The managed linear algebra provider. + + + The managed linear algebra provider. + + + The managed linear algebra provider. + + + The managed linear algebra provider. + + + + + Adds a scaled vector to another: result = y + alpha*x. + + The vector to update. + The value to scale by. + The vector to add to . + The result of the addition. + This is similar to the AXPY BLAS routine. + + + + Scales an array. Can be used to scale a vector and a matrix. + + The scalar. + The values to scale. + This result of the scaling. + This is similar to the SCAL BLAS routine. + + + + Conjugates an array. Can be used to conjugate a vector and a matrix. + + The values to conjugate. + This result of the conjugation. + + + + Computes the dot product of x and y. + + The vector x. + The vector y. + The dot product of x and y. + This is equivalent to the DOT BLAS routine. + + + + Does a point wise add of two arrays z = x + y. This can be used + to add vectors or matrices. + + The array x. + The array y. + The result of the addition. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise subtraction of two arrays z = x - y. This can be used + to subtract vectors or matrices. + + The array x. + The array y. + The result of the subtraction. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise multiplication of two arrays z = x * y. This can be used + to multiple elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise multiplication. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise division of two arrays z = x / y. This can be used + to divide elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise division. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise power of two arrays z = x ^ y. This can be used + to raise elements of vectors or matrices to the powers of another vector or matrix. + + The array x. + The array y. + The result of the point wise power. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Computes the requested of the matrix. + + The type of norm to compute. + The number of rows. + The number of columns. + The matrix to compute the norm from. + + The requested of the matrix. + + + + + Multiples two matrices. result = x * y + + The x matrix. + The number of rows in the x matrix. + The number of columns in the x matrix. + The y matrix. + The number of rows in the y matrix. + The number of columns in the y matrix. + Where to store the result of the multiplication. + This is a simplified version of the BLAS GEMM routine with alpha + set to 1.0 and beta set to 0.0, and x and y are not transposed. + + + + Multiplies two matrices and updates another with the result. c = alpha*op(a)*op(b) + beta*c + + How to transpose the matrix. + How to transpose the matrix. + The value to scale matrix. + The a matrix. + The number of rows in the matrix. + The number of columns in the matrix. + The b matrix + The number of rows in the matrix. + The number of columns in the matrix. + The value to scale the matrix. + The c matrix. + + + + Cache-Oblivious Matrix Multiplication + + if set to true transpose matrix A. + if set to true transpose matrix B. + The value to scale the matrix A with. + The matrix A. + Row-shift of the left matrix + Column-shift of the left matrix + The matrix B. + Row-shift of the right matrix + Column-shift of the right matrix + The matrix C. + Row-shift of the result matrix + Column-shift of the result matrix + The number of rows of matrix op(A) and of the matrix C. + The number of columns of matrix op(B) and of the matrix C. + The number of columns of matrix op(A) and the rows of the matrix op(B). + The constant number of rows of matrix op(A) and of the matrix C. + The constant number of columns of matrix op(B) and of the matrix C. + The constant number of columns of matrix op(A) and the rows of the matrix op(B). + Indicates if this is the first recursion. + + + + Computes the LUP factorization of A. P*A = L*U. + + An by matrix. The matrix is overwritten with the + the LU factorization on exit. The lower triangular factor L is stored in under the diagonal of (the diagonal is always 1.0 + for the L factor). The upper triangular factor U is stored on and above the diagonal of . + The order of the square matrix . + On exit, it contains the pivot indices. The size of the array must be . + This is equivalent to the GETRF LAPACK routine. + + + + Computes the inverse of matrix using LU factorization. + + The N by N matrix to invert. Contains the inverse On exit. + The order of the square matrix . + This is equivalent to the GETRF and GETRI LAPACK routines. + + + + Computes the inverse of a previously factored matrix. + + The LU factored N by N matrix. Contains the inverse On exit. + The order of the square matrix . + The pivot indices of . + This is equivalent to the GETRI LAPACK routine. + + + + Solves A*X=B for X using LU factorization. + + The number of columns of B. + The square matrix A. + The order of the square matrix . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRF and GETRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The number of columns of B. + The factored A matrix. + The order of the square matrix . + The pivot indices of . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRS LAPACK routine. + + + + Computes the Cholesky factorization of A. + + On entry, a square, positive definite matrix. On exit, the matrix is overwritten with the + the Cholesky factorization. + The number of rows or columns in the matrix. + This is equivalent to the POTRF LAPACK routine. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves A*X=B for X using Cholesky factorization. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRF add POTRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. + The number of rows and columns in A. + The B matrix. + The number of columns in the B matrix. + This is equivalent to the POTRS LAPACK routine. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. Has to be different than . + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The column to solve for. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the R matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A M by M matrix that holds the Q matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the Q matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A N by N matrix that holds the R matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Perform calculation of Q or R + + Work array + Index of column in work array + Q or R matrices + The first row in + The last row + The first column + The last column + Number of available CPUs + + + + Generate column from initial matrix to work array + + Work array + Initial matrix + The number of rows in matrix + The first row + Column index + + + + Solves A*X=B for X using QR factorization of A. + + The A matrix. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Solves A*X=B for X using a previously QR factored matrix. + + The Q matrix obtained by calling . + The R matrix obtained by calling . + The number of rows in the A matrix. + The number of columns in the A matrix. + Contains additional information on Q. Only used for the native solver + and can be null for the managed provider. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Computes the singular value decomposition of A. + + Compute the singular U and VT vectors or not. + On entry, the M by N matrix to decompose. On exit, A may be overwritten. + The number of rows in the A matrix. + The number of columns in the A matrix. + The singular values of A in ascending value. + If is true, on exit U contains the left + singular vectors. + If is true, on exit VT contains the transposed + right singular vectors. + This is equivalent to the GESVD LAPACK routine. + + + + Solves A*X=B for X using the singular value decomposition of A. + + On entry, the M by N matrix to decompose. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Solves A*X=B for X using a previously SVD decomposed matrix. + + The number of rows in the A matrix. + The number of columns in the A matrix. + The s values returned by . + The left singular vectors returned by . + The right singular vectors returned by . + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Computes the eigenvalues and eigenvectors of a matrix. + + Whether the matrix is symmetric or not. + The order of the matrix. + The matrix to decompose. The lenth of the array must be order * order. + On output, the matrix contains the eigen vectors. The lenth of the array must be order * order. + On output, the eigen values (λ) of matrix in ascending value. The length of the arry must . + On output, the block diagonal eigenvalue matrix. The lenth of the array must be order * order. + + + + Assumes that and have already been transposed. + + + + + Assumes that and have already been transposed. + + + + + Adds a scaled vector to another: result = y + alpha*x. + + The vector to update. + The value to scale by. + The vector to add to . + The result of the addition. + This is similar to the AXPY BLAS routine. + + + + Scales an array. Can be used to scale a vector and a matrix. + + The scalar. + The values to scale. + This result of the scaling. + This is similar to the SCAL BLAS routine. + + + + Conjugates an array. Can be used to conjugate a vector and a matrix. + + The values to conjugate. + This result of the conjugation. + + + + Computes the dot product of x and y. + + The vector x. + The vector y. + The dot product of x and y. + This is equivalent to the DOT BLAS routine. + + + + Does a point wise add of two arrays z = x + y. This can be used + to add vectors or matrices. + + The array x. + The array y. + The result of the addition. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise subtraction of two arrays z = x - y. This can be used + to subtract vectors or matrices. + + The array x. + The array y. + The result of the subtraction. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise multiplication of two arrays z = x * y. This can be used + to multiple elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise multiplication. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise division of two arrays z = x / y. This can be used + to divide elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise division. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise power of two arrays z = x ^ y. This can be used + to raise elements of vectors or matrices to the powers of another vector or matrix. + + The array x. + The array y. + The result of the point wise power. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Computes the requested of the matrix. + + The type of norm to compute. + The number of rows. + The number of columns. + The matrix to compute the norm from. + The requested of the matrix. + + + + Multiples two matrices. result = x * y + + The x matrix. + The number of rows in the x matrix. + The number of columns in the x matrix. + The y matrix. + The number of rows in the y matrix. + The number of columns in the y matrix. + Where to store the result of the multiplication. + This is a simplified version of the BLAS GEMM routine with alpha + set to 1.0 and beta set to 0.0, and x and y are not transposed. + + + + Multiplies two matrices and updates another with the result. c = alpha*op(a)*op(b) + beta*c + + How to transpose the matrix. + How to transpose the matrix. + The value to scale matrix. + The a matrix. + The number of rows in the matrix. + The number of columns in the matrix. + The b matrix + The number of rows in the matrix. + The number of columns in the matrix. + The value to scale the matrix. + The c matrix. + + + + Cache-Oblivious Matrix Multiplication + + if set to true transpose matrix A. + if set to true transpose matrix B. + The value to scale the matrix A with. + The matrix A. + Row-shift of the left matrix + Column-shift of the left matrix + The matrix B. + Row-shift of the right matrix + Column-shift of the right matrix + The matrix C. + Row-shift of the result matrix + Column-shift of the result matrix + The number of rows of matrix op(A) and of the matrix C. + The number of columns of matrix op(B) and of the matrix C. + The number of columns of matrix op(A) and the rows of the matrix op(B). + The constant number of rows of matrix op(A) and of the matrix C. + The constant number of columns of matrix op(B) and of the matrix C. + The constant number of columns of matrix op(A) and the rows of the matrix op(B). + Indicates if this is the first recursion. + + + + Computes the LUP factorization of A. P*A = L*U. + + An by matrix. The matrix is overwritten with the + the LU factorization on exit. The lower triangular factor L is stored in under the diagonal of (the diagonal is always 1.0 + for the L factor). The upper triangular factor U is stored on and above the diagonal of . + The order of the square matrix . + On exit, it contains the pivot indices. The size of the array must be . + This is equivalent to the GETRF LAPACK routine. + + + + Computes the inverse of matrix using LU factorization. + + The N by N matrix to invert. Contains the inverse On exit. + The order of the square matrix . + This is equivalent to the GETRF and GETRI LAPACK routines. + + + + Computes the inverse of a previously factored matrix. + + The LU factored N by N matrix. Contains the inverse On exit. + The order of the square matrix . + The pivot indices of . + This is equivalent to the GETRI LAPACK routine. + + + + Solves A*X=B for X using LU factorization. + + The number of columns of B. + The square matrix A. + The order of the square matrix . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRF and GETRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The number of columns of B. + The factored A matrix. + The order of the square matrix . + The pivot indices of . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRS LAPACK routine. + + + + Computes the Cholesky factorization of A. + + On entry, a square, positive definite matrix. On exit, the matrix is overwritten with the + the Cholesky factorization. + The number of rows or columns in the matrix. + This is equivalent to the POTRF LAPACK routine. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves A*X=B for X using Cholesky factorization. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRF add POTRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRS LAPACK routine. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. Has to be different than . + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The column to solve for. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the R matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A M by M matrix that holds the Q matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the Q matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A N by N matrix that holds the R matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Perform calculation of Q or R + + Work array + Index of column in work array + Q or R matrices + The first row in + The last row + The first column + The last column + Number of available CPUs + + + + Generate column from initial matrix to work array + + Work array + Initial matrix + The number of rows in matrix + The first row + Column index + + + + Solves A*X=B for X using QR factorization of A. + + The A matrix. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Solves A*X=B for X using a previously QR factored matrix. + + The Q matrix obtained by calling . + The R matrix obtained by calling . + The number of rows in the A matrix. + The number of columns in the A matrix. + Contains additional information on Q. Only used for the native solver + and can be null for the managed provider. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Computes the singular value decomposition of A. + + Compute the singular U and VT vectors or not. + On entry, the M by N matrix to decompose. On exit, A may be overwritten. + The number of rows in the A matrix. + The number of columns in the A matrix. + The singular values of A in ascending value. + If is true, on exit U contains the left + singular vectors. + If is true, on exit VT contains the transposed + right singular vectors. + This is equivalent to the GESVD LAPACK routine. + + + + Solves A*X=B for X using the singular value decomposition of A. + + On entry, the M by N matrix to decompose. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Solves A*X=B for X using a previously SVD decomposed matrix. + + The number of rows in the A matrix. + The number of columns in the A matrix. + The s values returned by . + The left singular vectors returned by . + The right singular vectors returned by . + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Computes the eigenvalues and eigenvectors of a matrix. + + Whether the matrix is symmetric or not. + The order of the matrix. + The matrix to decompose. The lenth of the array must be order * order. + On output, the matrix contains the eigen vectors. The lenth of the array must be order * order. + On output, the eigen values (λ) of matrix in ascending value. The length of the arry must . + On output, the block diagonal eigenvalue matrix. The lenth of the array must be order * order. + + + + Assumes that and have already been transposed. + + + + + Assumes that and have already been transposed. + + + + + Try to find out whether the provider is available, at least in principle. + Verification may still fail if available, but it will certainly fail if unavailable. + + + + + Initialize and verify that the provided is indeed available. If not, fall back to alternatives like the managed provider + + + + + Assumes that and have already been transposed. + + + + + Assumes that and have already been transposed. + + + + + Adds a scaled vector to another: result = y + alpha*x. + + The vector to update. + The value to scale by. + The vector to add to . + The result of the addition. + This is similar to the AXPY BLAS routine. + + + + Scales an array. Can be used to scale a vector and a matrix. + + The scalar. + The values to scale. + This result of the scaling. + This is similar to the SCAL BLAS routine. + + + + Conjugates an array. Can be used to conjugate a vector and a matrix. + + The values to conjugate. + This result of the conjugation. + + + + Computes the dot product of x and y. + + The vector x. + The vector y. + The dot product of x and y. + This is equivalent to the DOT BLAS routine. + + + + Does a point wise add of two arrays z = x + y. This can be used + to add vectors or matrices. + + The array x. + The array y. + The result of the addition. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise subtraction of two arrays z = x - y. This can be used + to subtract vectors or matrices. + + The array x. + The array y. + The result of the subtraction. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise multiplication of two arrays z = x * y. This can be used + to multiple elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise multiplication. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise division of two arrays z = x / y. This can be used + to divide elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise division. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise power of two arrays z = x ^ y. This can be used + to raise elements of vectors or matrices to the powers of another vector or matrix. + + The array x. + The array y. + The result of the point wise power. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Computes the requested of the matrix. + + The type of norm to compute. + The number of rows. + The number of columns. + The matrix to compute the norm from. + + The requested of the matrix. + + + + + Multiples two matrices. result = x * y + + The x matrix. + The number of rows in the x matrix. + The number of columns in the x matrix. + The y matrix. + The number of rows in the y matrix. + The number of columns in the y matrix. + Where to store the result of the multiplication. + This is a simplified version of the BLAS GEMM routine with alpha + set to 1.0 and beta set to 0.0, and x and y are not transposed. + + + + Multiplies two matrices and updates another with the result. c = alpha*op(a)*op(b) + beta*c + + How to transpose the matrix. + How to transpose the matrix. + The value to scale matrix. + The a matrix. + The number of rows in the matrix. + The number of columns in the matrix. + The b matrix + The number of rows in the matrix. + The number of columns in the matrix. + The value to scale the matrix. + The c matrix. + + + + Cache-Oblivious Matrix Multiplication + + if set to true transpose matrix A. + if set to true transpose matrix B. + The value to scale the matrix A with. + The matrix A. + Row-shift of the left matrix + Column-shift of the left matrix + The matrix B. + Row-shift of the right matrix + Column-shift of the right matrix + The matrix C. + Row-shift of the result matrix + Column-shift of the result matrix + The number of rows of matrix op(A) and of the matrix C. + The number of columns of matrix op(B) and of the matrix C. + The number of columns of matrix op(A) and the rows of the matrix op(B). + The constant number of rows of matrix op(A) and of the matrix C. + The constant number of columns of matrix op(B) and of the matrix C. + The constant number of columns of matrix op(A) and the rows of the matrix op(B). + Indicates if this is the first recursion. + + + + Computes the LUP factorization of A. P*A = L*U. + + An by matrix. The matrix is overwritten with the + the LU factorization on exit. The lower triangular factor L is stored in under the diagonal of (the diagonal is always 1.0 + for the L factor). The upper triangular factor U is stored on and above the diagonal of . + The order of the square matrix . + On exit, it contains the pivot indices. The size of the array must be . + This is equivalent to the GETRF LAPACK routine. + + + + Computes the inverse of matrix using LU factorization. + + The N by N matrix to invert. Contains the inverse On exit. + The order of the square matrix . + This is equivalent to the GETRF and GETRI LAPACK routines. + + + + Computes the inverse of a previously factored matrix. + + The LU factored N by N matrix. Contains the inverse On exit. + The order of the square matrix . + The pivot indices of . + This is equivalent to the GETRI LAPACK routine. + + + + Solves A*X=B for X using LU factorization. + + The number of columns of B. + The square matrix A. + The order of the square matrix . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRF and GETRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The number of columns of B. + The factored A matrix. + The order of the square matrix . + The pivot indices of . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRS LAPACK routine. + + + + Computes the Cholesky factorization of A. + + On entry, a square, positive definite matrix. On exit, the matrix is overwritten with the + the Cholesky factorization. + The number of rows or columns in the matrix. + This is equivalent to the POTRF LAPACK routine. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves A*X=B for X using Cholesky factorization. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRF add POTRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. Has to be different than . + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRS LAPACK routine. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. Has to be different than . + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The column to solve for. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the R matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A M by M matrix that holds the Q matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the Q matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A N by N matrix that holds the R matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Perform calculation of Q or R + + Work array + Index of column in work array + Q or R matrices + The first row in + The last row + The first column + The last column + Number of available CPUs + + + + Generate column from initial matrix to work array + + Work array + Initial matrix + The number of rows in matrix + The first row + Column index + + + + Solves A*X=B for X using QR factorization of A. + + The A matrix. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Solves A*X=B for X using a previously QR factored matrix. + + The Q matrix obtained by calling . + The R matrix obtained by calling . + The number of rows in the A matrix. + The number of columns in the A matrix. + Contains additional information on Q. Only used for the native solver + and can be null for the managed provider. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Computes the singular value decomposition of A. + + Compute the singular U and VT vectors or not. + On entry, the M by N matrix to decompose. On exit, A may be overwritten. + The number of rows in the A matrix. + The number of columns in the A matrix. + The singular values of A in ascending value. + If is true, on exit U contains the left + singular vectors. + If is true, on exit VT contains the transposed + right singular vectors. + This is equivalent to the GESVD LAPACK routine. + + + + Given the Cartesian coordinates (da, db) of a point p, these function return the parameters da, db, c, and s + associated with the Givens rotation that zeros the y-coordinate of the point. + + Provides the x-coordinate of the point p. On exit contains the parameter r associated with the Givens rotation + Provides the y-coordinate of the point p. On exit contains the parameter z associated with the Givens rotation + Contains the parameter c associated with the Givens rotation + Contains the parameter s associated with the Givens rotation + This is equivalent to the DROTG LAPACK routine. + + + + Solves A*X=B for X using the singular value decomposition of A. + + On entry, the M by N matrix to decompose. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Solves A*X=B for X using a previously SVD decomposed matrix. + + The number of rows in the A matrix. + The number of columns in the A matrix. + The s values returned by . + The left singular vectors returned by . + The right singular vectors returned by . + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Computes the eigenvalues and eigenvectors of a matrix. + + Whether the matrix is symmetric or not. + The order of the matrix. + The matrix to decompose. The lenth of the array must be order * order. + On output, the matrix contains the eigen vectors. The lenth of the array must be order * order. + On output, the eigen values (λ) of matrix in ascending value. The length of the arry must . + On output, the block diagonal eigenvalue matrix. The lenth of the array must be order * order. + + + + Adds a scaled vector to another: result = y + alpha*x. + + The vector to update. + The value to scale by. + The vector to add to . + The result of the addition. + This is similar to the AXPY BLAS routine. + + + + Scales an array. Can be used to scale a vector and a matrix. + + The scalar. + The values to scale. + This result of the scaling. + This is similar to the SCAL BLAS routine. + + + + Conjugates an array. Can be used to conjugate a vector and a matrix. + + The values to conjugate. + This result of the conjugation. + + + + Computes the dot product of x and y. + + The vector x. + The vector y. + The dot product of x and y. + This is equivalent to the DOT BLAS routine. + + + + Does a point wise add of two arrays z = x + y. This can be used + to add vectors or matrices. + + The array x. + The array y. + The result of the addition. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise subtraction of two arrays z = x - y. This can be used + to subtract vectors or matrices. + + The array x. + The array y. + The result of the subtraction. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise multiplication of two arrays z = x * y. This can be used + to multiple elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise multiplication. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise division of two arrays z = x / y. This can be used + to divide elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise division. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise power of two arrays z = x ^ y. This can be used + to raise elements of vectors or matrices to the powers of another vector or matrix. + + The array x. + The array y. + The result of the point wise power. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Computes the requested of the matrix. + + The type of norm to compute. + The number of rows. + The number of columns. + The matrix to compute the norm from. + + The requested of the matrix. + + + + + Multiples two matrices. result = x * y + + The x matrix. + The number of rows in the x matrix. + The number of columns in the x matrix. + The y matrix. + The number of rows in the y matrix. + The number of columns in the y matrix. + Where to store the result of the multiplication. + This is a simplified version of the BLAS GEMM routine with alpha + set to 1.0 and beta set to 0.0, and x and y are not transposed. + + + + Multiplies two matrices and updates another with the result. c = alpha*op(a)*op(b) + beta*c + + How to transpose the matrix. + How to transpose the matrix. + The value to scale matrix. + The a matrix. + The number of rows in the matrix. + The number of columns in the matrix. + The b matrix + The number of rows in the matrix. + The number of columns in the matrix. + The value to scale the matrix. + The c matrix. + + + + Cache-Oblivious Matrix Multiplication + + if set to true transpose matrix A. + if set to true transpose matrix B. + The value to scale the matrix A with. + The matrix A. + Row-shift of the left matrix + Column-shift of the left matrix + The matrix B. + Row-shift of the right matrix + Column-shift of the right matrix + The matrix C. + Row-shift of the result matrix + Column-shift of the result matrix + The number of rows of matrix op(A) and of the matrix C. + The number of columns of matrix op(B) and of the matrix C. + The number of columns of matrix op(A) and the rows of the matrix op(B). + The constant number of rows of matrix op(A) and of the matrix C. + The constant number of columns of matrix op(B) and of the matrix C. + The constant number of columns of matrix op(A) and the rows of the matrix op(B). + Indicates if this is the first recursion. + + + + Computes the LUP factorization of A. P*A = L*U. + + An by matrix. The matrix is overwritten with the + the LU factorization on exit. The lower triangular factor L is stored in under the diagonal of (the diagonal is always 1.0 + for the L factor). The upper triangular factor U is stored on and above the diagonal of . + The order of the square matrix . + On exit, it contains the pivot indices. The size of the array must be . + This is equivalent to the GETRF LAPACK routine. + + + + Computes the inverse of matrix using LU factorization. + + The N by N matrix to invert. Contains the inverse On exit. + The order of the square matrix . + This is equivalent to the GETRF and GETRI LAPACK routines. + + + + Computes the inverse of a previously factored matrix. + + The LU factored N by N matrix. Contains the inverse On exit. + The order of the square matrix . + The pivot indices of . + This is equivalent to the GETRI LAPACK routine. + + + + Solves A*X=B for X using LU factorization. + + The number of columns of B. + The square matrix A. + The order of the square matrix . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRF and GETRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The number of columns of B. + The factored A matrix. + The order of the square matrix . + The pivot indices of . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRS LAPACK routine. + + + + Computes the Cholesky factorization of A. + + On entry, a square, positive definite matrix. On exit, the matrix is overwritten with the + the Cholesky factorization. + The number of rows or columns in the matrix. + This is equivalent to the POTRF LAPACK routine. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves A*X=B for X using Cholesky factorization. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRF add POTRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRS LAPACK routine. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. Has to be different than . + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The column to solve for. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the R matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A M by M matrix that holds the Q matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the Q matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A N by N matrix that holds the R matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Perform calculation of Q or R + + Work array + Index of column in work array + Q or R matrices + The first row in + The last row + The first column + The last column + Number of available CPUs + + + + Generate column from initial matrix to work array + + Work array + Initial matrix + The number of rows in matrix + The first row + Column index + + + + Solves A*X=B for X using QR factorization of A. + + The A matrix. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Solves A*X=B for X using a previously QR factored matrix. + + The Q matrix obtained by calling . + The R matrix obtained by calling . + The number of rows in the A matrix. + The number of columns in the A matrix. + Contains additional information on Q. Only used for the native solver + and can be null for the managed provider. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Computes the singular value decomposition of A. + + Compute the singular U and VT vectors or not. + On entry, the M by N matrix to decompose. On exit, A may be overwritten. + The number of rows in the A matrix. + The number of columns in the A matrix. + The singular values of A in ascending value. + If is true, on exit U contains the left + singular vectors. + If is true, on exit VT contains the transposed + right singular vectors. + This is equivalent to the GESVD LAPACK routine. + + + + Given the Cartesian coordinates (da, db) of a point p, these function return the parameters da, db, c, and s + associated with the Givens rotation that zeros the y-coordinate of the point. + + Provides the x-coordinate of the point p. On exit contains the parameter r associated with the Givens rotation + Provides the y-coordinate of the point p. On exit contains the parameter z associated with the Givens rotation + Contains the parameter c associated with the Givens rotation + Contains the parameter s associated with the Givens rotation + This is equivalent to the DROTG LAPACK routine. + + + + Solves A*X=B for X using the singular value decomposition of A. + + On entry, the M by N matrix to decompose. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Solves A*X=B for X using a previously SVD decomposed matrix. + + The number of rows in the A matrix. + The number of columns in the A matrix. + The s values returned by . + The left singular vectors returned by . + The right singular vectors returned by . + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Computes the eigenvalues and eigenvectors of a matrix. + + Whether the matrix is symmetric or not. + The order of the matrix. + The matrix to decompose. The lenth of the array must be order * order. + On output, the matrix contains the eigen vectors. The lenth of the array must be order * order. + On output, the eigen values (λ) of matrix in ascending value. The length of the arry must . + On output, the block diagonal eigenvalue matrix. The lenth of the array must be order * order. + + + + A random number generator based on the class in the .NET library. + + + + + Construct a new random number generator with a random seed. + + Uses and uses the value of + to set whether the instance is thread safe. + + + + Construct a new random number generator with random seed. + + The to use. + Uses the value of to set whether the instance is thread safe. + + + + Construct a new random number generator with random seed. + + Uses + if set to true , the class is thread safe. + + + + Construct a new random number generator with random seed. + + The to use. + if set to true , the class is thread safe. + + + + Fills the elements of a specified array of bytes with random numbers in full range, including zero and 255 (). + + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Returns a random 32-bit signed integer greater than or equal to zero and less than + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Multiplicative congruential generator using a modulus of 2^31-1 and a multiplier of 1132489760. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + if set to true, the class is thread safe. + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Multiplicative congruential generator using a modulus of 2^59 and a multiplier of 13^13. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + The seed is set to 1, if the zero is used as the seed. + if set to true , the class is thread safe. + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Random number generator using Mersenne Twister 19937 algorithm. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + Uses the value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + if set to true, the class is thread safe. + + + + Default instance, thread-safe. + + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Returns a random 32-bit signed integer greater than or equal to zero and less than + + + + + Fills the elements of a specified array of bytes with random numbers in full range, including zero and 255 (). + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + A 32-bit combined multiple recursive generator with 2 components of order 3. + + Based off of P. L'Ecuyer, "Combined Multiple Recursive Random Number Generators," Operations Research, 44, 5 (1996), 816--822. + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + if set to true, the class is thread safe. + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Represents a Parallel Additive Lagged Fibonacci pseudo-random number generator. + + + The type bases upon the implementation in the + Boost Random Number Library. + It uses the modulus 232 and by default the "lags" 418 and 1279. Some popular pairs are presented on + Wikipedia - Lagged Fibonacci generator. + + + + + Default value for the ShortLag + + + + + Default value for the LongLag + + + + + The multiplier to compute a double-precision floating point number [0, 1) + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + if set to true, the class is thread safe. + The ShortLag value + TheLongLag value + + + + Gets the short lag of the Lagged Fibonacci pseudo-random number generator. + + + + + Gets the long lag of the Lagged Fibonacci pseudo-random number generator. + + + + + Stores an array of random numbers + + + + + Stores an index for the random number array element that will be accessed next. + + + + + Fills the array with new unsigned random numbers. + + + Generated random numbers are 32-bit unsigned integers greater than or equal to 0 + and less than or equal to . + + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Returns a random 32-bit signed integer greater than or equal to zero and less than + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + This class implements extension methods for the System.Random class. The extension methods generate + pseudo-random distributed numbers for types other than double and int32. + + + + + Fills an array with uniform random numbers greater than or equal to 0.0 and less than 1.0. + + The random number generator. + The array to fill with random values. + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns an array of uniform random numbers greater than or equal to 0.0 and less than 1.0. + + The random number generator. + The size of the array to fill. + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns an infinite sequence of uniform random numbers greater than or equal to 0.0 and less than 1.0. + + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns an array of uniform random bytes. + + The random number generator. + The size of the array to fill. + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Fills an array with uniform random 32-bit signed integers greater than or equal to zero and less than . + + The random number generator. + The array to fill with random values. + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Fills an array with uniform random 32-bit signed integers within the specified range. + + The random number generator. + The array to fill with random values. + Lower bound, inclusive. + Upper bound, exclusive. + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns an infinite sequence of uniform random numbers greater than or equal to 0.0 and less than 1.0. + + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns a nonnegative random number less than . + + The random number generator. + + A 64-bit signed integer greater than or equal to 0, and less than ; that is, + the range of return values includes 0 but not . + + + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns a random number of the full Int32 range. + + The random number generator. + + A 32-bit signed integer of the full range, including 0, negative numbers, + and . + + + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns a random number of the full Int64 range. + + The random number generator. + + A 64-bit signed integer of the full range, including 0, negative numbers, + and . + + + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns a nonnegative decimal floating point random number less than 1.0. + + The random number generator. + + A decimal floating point number greater than or equal to 0.0, and less than 1.0; that is, + the range of return values includes 0.0 but not 1.0. + + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns a random boolean. + + The random number generator. + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Provides a time-dependent seed value, matching the default behavior of System.Random. + WARNING: There is no randomness in this seed and quick repeated calls can cause + the same seed value. Do not use for cryptography! + + + + + Provides a seed based on time and unique GUIDs. + WARNING: There is only low randomness in this seed, but at least quick repeated + calls will result in different seed values. Do not use for cryptography! + + + + + Provides a seed based on an internal random number generator (crypto if available), time and unique GUIDs. + WARNING: There is only medium randomness in this seed, but quick repeated + calls will result in different seed values. Do not use for cryptography! + + + + + Base class for random number generators. This class introduces a layer between + and the Math.Net Numerics random number generators to provide thread safety. + When used directly it use the System.Random as random number source. + + + + + Initializes a new instance of the class using + the value of to set whether + the instance is thread safe or not. + + + + + Initializes a new instance of the class. + + if set to true , the class is thread safe. + Thread safe instances are two and half times slower than non-thread + safe classes. + + + + Fills an array with uniform random numbers greater than or equal to 0.0 and less than 1.0. + + The array to fill with random values. + + + + Returns an array of uniform random numbers greater than or equal to 0.0 and less than 1.0. + + The size of the array to fill. + + + + Returns an infinite sequence of uniform random numbers greater than or equal to 0.0 and less than 1.0. + + + + + Returns a random 32-bit signed integer greater than or equal to zero and less than . + + + + + Returns a random number less then a specified maximum. + + The exclusive upper bound of the random number returned. Range: maxExclusive ≥ 1. + A 32-bit signed integer less than . + is zero or negative. + + + + Returns a random number within a specified range. + + The inclusive lower bound of the random number returned. + The exclusive upper bound of the random number returned. Range: maxExclusive > minExclusive. + + A 32-bit signed integer greater than or equal to and less than ; that is, the range of return values includes but not . If equals , is returned. + + is greater than . + + + + Fills an array with random 32-bit signed integers greater than or equal to zero and less than . + + The array to fill with random values. + + + + Returns an array with random 32-bit signed integers greater than or equal to zero and less than . + + The size of the array to fill. + + + + Fills an array with random numbers within a specified range. + + The array to fill with random values. + The exclusive upper bound of the random number returned. Range: maxExclusive ≥ 1. + + + + Returns an array with random 32-bit signed integers within the specified range. + + The size of the array to fill. + The exclusive upper bound of the random number returned. Range: maxExclusive ≥ 1. + + + + Fills an array with random numbers within a specified range. + + The array to fill with random values. + The inclusive lower bound of the random number returned. + The exclusive upper bound of the random number returned. Range: maxExclusive > minExclusive. + + + + Returns an array with random 32-bit signed integers within the specified range. + + The size of the array to fill. + The inclusive lower bound of the random number returned. + The exclusive upper bound of the random number returned. Range: maxExclusive > minExclusive. + + + + Returns an infinite sequence of random 32-bit signed integers greater than or equal to zero and less than . + + + + + Returns an infinite sequence of random numbers within a specified range. + + The inclusive lower bound of the random number returned. + The exclusive upper bound of the random number returned. Range: maxExclusive > minExclusive. + + + + Fills the elements of a specified array of bytes with random numbers. + + An array of bytes to contain random numbers. + is null. + + + + Returns a random number between 0.0 and 1.0. + + A double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Returns a random 32-bit signed integer greater than or equal to zero and less than 2147483647 (). + + + + + Fills the elements of a specified array of bytes with random numbers in full range, including zero and 255 (). + + + + + Returns a random N-bit signed integer greater than or equal to zero and less than 2^N. + N (bit count) is expected to be greater than zero and less than 32 (not verified). + + + + + Returns a random N-bit signed long integer greater than or equal to zero and less than 2^N. + N (bit count) is expected to be greater than zero and less than 64 (not verified). + + + + + Returns a random 32-bit signed integer within the specified range. + + The exclusive upper bound of the random number returned. Range: maxExclusive ≥ 2 (not verified, must be ensured by caller). + + + + Returns a random 32-bit signed integer within the specified range. + + The inclusive lower bound of the random number returned. + The exclusive upper bound of the random number returned. Range: maxExclusive ≥ minExclusive + 2 (not verified, must be ensured by caller). + + + + A random number generator based on the class in the .NET library. + + + + + Construct a new random number generator with a random seed. + + + + + Construct a new random number generator with random seed. + + if set to true , the class is thread safe. + + + + Construct a new random number generator with random seed. + + The seed value. + + + + Construct a new random number generator with random seed. + + The seed value. + if set to true , the class is thread safe. + + + + Default instance, thread-safe. + + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Returns a random 32-bit signed integer greater than or equal to zero and less than + + + + + Returns a random 32-bit signed integer within the specified range. + + The exclusive upper bound of the random number returned. Range: maxExclusive ≥ 2 (not verified, must be ensured by caller). + + + + Returns a random 32-bit signed integer within the specified range. + + The inclusive lower bound of the random number returned. + The exclusive upper bound of the random number returned. Range: maxExclusive ≥ minExclusive + 2 (not verified, must be ensured by caller). + + + + Fills the elements of a specified array of bytes with random numbers in full range, including zero and 255 (). + + + + + Fill an array with uniform random numbers greater than or equal to 0.0 and less than 1.0. + WARNING: potentially very short random sequence length, can generate repeated partial sequences. + + Parallelized on large length, but also supports being called in parallel from multiple threads + + + + Returns an array of uniform random numbers greater than or equal to 0.0 and less than 1.0. + WARNING: potentially very short random sequence length, can generate repeated partial sequences. + + Parallelized on large length, but also supports being called in parallel from multiple threads + + + + Returns an infinite sequence of uniform random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Wichmann-Hill’s 1982 combined multiplicative congruential generator. + + See: Wichmann, B. A. & Hill, I. D. (1982), "Algorithm AS 183: + An efficient and portable pseudo-random number generator". Applied Statistics 31 (1982) 188-190 + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + The seed is set to 1, if the zero is used as the seed. + if set to true , the class is thread safe. + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Wichmann-Hill’s 2006 combined multiplicative congruential generator. + + See: Wichmann, B. A. & Hill, I. D. (2006), "Generating good pseudo-random numbers". + Computational Statistics & Data Analysis 51:3 (2006) 1614-1622 + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + The seed is set to 1, if the zero is used as the seed. + if set to true , the class is thread safe. + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Implements a multiply-with-carry Xorshift pseudo random number generator (RNG) specified in Marsaglia, George. (2003). Xorshift RNGs. + Xn = a * Xn−3 + c mod 2^32 + http://www.jstatsoft.org/v08/i14/paper + + + + + The default value for X1. + + + + + The default value for X2. + + + + + The default value for the multiplier. + + + + + The default value for the carry over. + + + + + The multiplier to compute a double-precision floating point number [0, 1) + + + + + Seed or last but three unsigned random number. + + + + + Last but two unsigned random number. + + + + + Last but one unsigned random number. + + + + + The value of the carry over. + + + + + The multiplier. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + Uses the default values of: + + a = 916905990 + c = 13579 + X1 = 77465321 + X2 = 362436069 + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + The multiply value + The initial carry value. + The initial value if X1. + The initial value if X2. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + Note: must be less than . + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + Uses the default values of: + + a = 916905990 + c = 13579 + X1 = 77465321 + X2 = 362436069 + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + The multiply value + The initial carry value. + The initial value if X1. + The initial value if X2. + must be less than . + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + Uses the default values of: + + a = 916905990 + c = 13579 + X1 = 77465321 + X2 = 362436069 + + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + The multiply value + The initial carry value. + The initial value if X1. + The initial value if X2. + must be less than . + + + + Initializes a new instance of the class. + + The seed value. + if set to true, the class is thread safe. + + Uses the default values of: + + a = 916905990 + c = 13579 + X1 = 77465321 + X2 = 362436069 + + + + + Initializes a new instance of the class. + + The seed value. + if set to true, the class is thread safe. + The multiply value + The initial carry value. + The initial value if X1. + The initial value if X2. + must be less than . + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Returns a random 32-bit signed integer greater than or equal to zero and less than + + + + + Fills the elements of a specified array of bytes with random numbers in full range, including zero and 255 (). + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Bisection root-finding algorithm. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + Guess for the low value of the range where the root is supposed to be. Will be expanded if needed. + Guess for the high value of the range where the root is supposed to be. Will be expanded if needed. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Factor at which to expand the bounds, if needed. Default 1.6. + Maximum number of expand iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy for both the root and the function value at the root. The root will be refined until the accuracy or the maximum number of iterations is reached. + Maximum number of iterations. Usually 100. + The root that was found, if any. Undefined if the function returns false. + True if a root with the specified accuracy was found, else false. + + + + Algorithm by by Brent, Van Wijngaarden, Dekker et al. + Implementation inspired by Press, Teukolsky, Vetterling, and Flannery, "Numerical Recipes in C", 2nd edition, Cambridge University Press + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + Guess for the low value of the range where the root is supposed to be. Will be expanded if needed. + Guess for the high value of the range where the root is supposed to be. Will be expanded if needed. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Factor at which to expand the bounds, if needed. Default 1.6. + Maximum number of expand iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. + Maximum number of iterations. Usually 100. + The root that was found, if any. Undefined if the function returns false. + True if a root with the specified accuracy was found, else false. + + + Helper method useful for preventing rounding errors. + a*sign(b) + + + + Algorithm by Broyden. + Implementation inspired by Press, Teukolsky, Vetterling, and Flannery, "Numerical Recipes in C", 2nd edition, Cambridge University Press + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + Initial guess of the root. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + Initial guess of the root. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. + Maximum number of iterations. Usually 100. + The root that was found, if any. Undefined if the function returns false. + True if a root with the specified accuracy was found, else false. + + + + Helper method to calculate an approximation of the Jacobian. + + The function. + The argument (initial guess). + The result (of initial guess). + + + + Finds roots to the cubic equation x^3 + a2*x^2 + a1*x + a0 = 0 + Implements the cubic formula in http://mathworld.wolfram.com/CubicFormula.html + + + + + Q and R are transformed variables. + + + + + n^(1/3) - work around a negative double raised to (1/3) + + + + + Find all real-valued roots of the cubic equation a0 + a1*x + a2*x^2 + x^3 = 0. + Note the special coefficient order ascending by exponent (consistent with polynomials). + + + + + Find all three complex roots of the cubic equation d + c*x + b*x^2 + a*x^3 = 0. + Note the special coefficient order ascending by exponent (consistent with polynomials). + + + + + Pure Newton-Raphson root-finding algorithm without any recovery measures in cases it behaves badly. + The algorithm aborts immediately if the root leaves the bound interval. + + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first derivative of the function to find roots from. + The low value of the range where the root is supposed to be. Aborts if it leaves the interval. + The high value of the range where the root is supposed to be. Aborts if it leaves the interval. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first derivative of the function to find roots from. + Initial guess of the root. + The low value of the range where the root is supposed to be. Aborts if it leaves the interval. Default MinValue. + The high value of the range where the root is supposed to be. Aborts if it leaves the interval. Default MaxValue. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first derivative of the function to find roots from. + Initial guess of the root. + The low value of the range where the root is supposed to be. Aborts if it leaves the interval. + The high value of the range where the root is supposed to be. Aborts if it leaves the interval. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Example: 1e-14. + Maximum number of iterations. Example: 100. + The root that was found, if any. Undefined if the function returns false. + True if a root with the specified accuracy was found, else false. + + + + Robust Newton-Raphson root-finding algorithm that falls back to bisection when overshooting or converging too slow, or to subdivision on lacking bracketing. + + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first derivative of the function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + How many parts an interval should be split into for zero crossing scanning in case of lacking bracketing. Default 20. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first derivative of the function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Example: 1e-14. + Maximum number of iterations. Example: 100. + How many parts an interval should be split into for zero crossing scanning in case of lacking bracketing. Example: 20. + The root that was found, if any. Undefined if the function returns false. + True if a root with the specified accuracy was found, else false. + + + + Pure Secant root-finding algorithm without any recovery measures in cases it behaves badly. + The algorithm aborts immediately if the root leaves the bound interval. + + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first guess of the root within the bounds specified. + The second guess of the root within the bounds specified. + The low value of the range where the root is supposed to be. Aborts if it leaves the interval. Default MinValue. + The high value of the range where the root is supposed to be. Aborts if it leaves the interval. Default MaxValue. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first guess of the root within the bounds specified. + The second guess of the root within the bounds specified. + The low value of the range where the root is supposed to be. Aborts if it leaves the interval. + The low value of the range where the root is supposed to be. Aborts if it leaves the interval. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Example: 1e-14. + Maximum number of iterations. Example: 100. + The root that was found, if any. Undefined if the function returns false. + True if a root with the specified accuracy was found, else false + + + Detect a range containing at least one root. + The function to detect roots from. + Lower value of the range. + Upper value of the range + The growing factor of research. Usually 1.6. + Maximum number of iterations. Usually 50. + True if the bracketing operation succeeded, false otherwise. + This iterative methods stops when two values with opposite signs are found. + + + + Sorting algorithms for single, tuple and triple lists. + + + + + Sort a list of keys, in place using the quick sort algorithm using the quick sort algorithm. + + The type of elements in the key list. + List to sort. + Comparison, defining the sort order. + + + + Sort a list of keys and items with respect to the keys, in place using the quick sort algorithm. + + The type of elements in the key list. + The type of elements in the item list. + List to sort. + List to permute the same way as the key list. + Comparison, defining the sort order. + + + + Sort a list of keys, items1 and items2 with respect to the keys, in place using the quick sort algorithm. + + The type of elements in the key list. + The type of elements in the first item list. + The type of elements in the second item list. + List to sort. + First list to permute the same way as the key list. + Second list to permute the same way as the key list. + Comparison, defining the sort order. + + + + Sort a range of a list of keys, in place using the quick sort algorithm. + + The type of element in the list. + List to sort. + The zero-based starting index of the range to sort. + The length of the range to sort. + Comparison, defining the sort order. + + + + Sort a list of keys and items with respect to the keys, in place using the quick sort algorithm. + + The type of elements in the key list. + The type of elements in the item list. + List to sort. + List to permute the same way as the key list. + The zero-based starting index of the range to sort. + The length of the range to sort. + Comparison, defining the sort order. + + + + Sort a list of keys, items1 and items2 with respect to the keys, in place using the quick sort algorithm. + + The type of elements in the key list. + The type of elements in the first item list. + The type of elements in the second item list. + List to sort. + First list to permute the same way as the key list. + Second list to permute the same way as the key list. + The zero-based starting index of the range to sort. + The length of the range to sort. + Comparison, defining the sort order. + + + + Sort a list of keys and items with respect to the keys, in place using the quick sort algorithm. + + The type of elements in the primary list. + The type of elements in the secondary list. + List to sort. + List to sort on duplicate primary items, and permute the same way as the key list. + Comparison, defining the primary sort order. + Comparison, defining the secondary sort order. + + + + Recursive implementation for an in place quick sort on a list. + + The type of the list on which the quick sort is performed. + The list which is sorted using quick sort. + The method with which to compare two elements of the quick sort. + The left boundary of the quick sort. + The right boundary of the quick sort. + + + + Recursive implementation for an in place quick sort on a list while reordering one other list accordingly. + + The type of the list on which the quick sort is performed. + The type of the list which is automatically reordered accordingly. + The list which is sorted using quick sort. + The list which is automatically reordered accordingly. + The method with which to compare two elements of the quick sort. + The left boundary of the quick sort. + The right boundary of the quick sort. + + + + Recursive implementation for an in place quick sort on one list while reordering two other lists accordingly. + + The type of the list on which the quick sort is performed. + The type of the first list which is automatically reordered accordingly. + The type of the second list which is automatically reordered accordingly. + The list which is sorted using quick sort. + The first list which is automatically reordered accordingly. + The second list which is automatically reordered accordingly. + The method with which to compare two elements of the quick sort. + The left boundary of the quick sort. + The right boundary of the quick sort. + + + + Recursive implementation for an in place quick sort on the primary and then by the secondary list while reordering one secondary list accordingly. + + The type of the primary list. + The type of the secondary list. + The list which is sorted using quick sort. + The list which is sorted secondarily (on primary duplicates) and automatically reordered accordingly. + The method with which to compare two elements of the primary list. + The method with which to compare two elements of the secondary list. + The left boundary of the quick sort. + The right boundary of the quick sort. + + + + Performs an in place swap of two elements in a list. + + The type of elements stored in the list. + The list in which the elements are stored. + The index of the first element of the swap. + The index of the second element of the swap. + + + + This partial implementation of the SpecialFunctions class contains all methods related to the error function. + + + This partial implementation of the SpecialFunctions class contains all methods related to the harmonic function. + + + This partial implementation of the SpecialFunctions class contains all methods related to the logistic function. + + + This partial implementation of the SpecialFunctions class contains all methods related to the modified bessel function. + + + This partial implementation of the SpecialFunctions class contains all methods related to the modified bessel function. + + + + + Computes the logarithm of the Euler Beta function. + + The first Beta parameter, a positive real number. + The second Beta parameter, a positive real number. + The logarithm of the Euler Beta function evaluated at z,w. + If or are not positive. + + + + Computes the Euler Beta function. + + The first Beta parameter, a positive real number. + The second Beta parameter, a positive real number. + The Euler Beta function evaluated at z,w. + If or are not positive. + + + + Returns the lower incomplete (unregularized) beta function + B(a,b,x) = int(t^(a-1)*(1-t)^(b-1),t=0..x) for real a > 0, b > 0, 1 >= x >= 0. + + The first Beta parameter, a positive real number. + The second Beta parameter, a positive real number. + The upper limit of the integral. + The lower incomplete (unregularized) beta function. + + + + Returns the regularized lower incomplete beta function + I_x(a,b) = 1/Beta(a,b) * int(t^(a-1)*(1-t)^(b-1),t=0..x) for real a > 0, b > 0, 1 >= x >= 0. + + The first Beta parameter, a positive real number. + The second Beta parameter, a positive real number. + The upper limit of the integral. + The regularized lower incomplete beta function. + + + + ************************************** + COEFFICIENTS FOR METHOD ErfImp * + ************************************** + + Polynomial coefficients for a numerator of ErfImp + calculation for Erf(x) in the interval [1e-10, 0.5]. + + + + Polynomial coefficients for adenominator of ErfImp + calculation for Erf(x) in the interval [1e-10, 0.5]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [0.5, 0.75]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [0.5, 0.75]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [0.75, 1.25]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [0.75, 1.25]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [1.25, 2.25]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [1.25, 2.25]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [2.25, 3.5]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [2.25, 3.5]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [3.5, 5.25]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [3.5, 5.25]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [5.25, 8]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [5.25, 8]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [8, 11.5]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [8, 11.5]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [11.5, 17]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [11.5, 17]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [17, 24]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [17, 24]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [24, 38]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [24, 38]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [38, 60]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [38, 60]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [60, 85]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [60, 85]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [85, 110]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [85, 110]. + + + + + ************************************** + COEFFICIENTS FOR METHOD ErfInvImp * + ************************************** + + Polynomial coefficients for a numerator of ErfInvImp + calculation for Erf^-1(z) in the interval [0, 0.5]. + + + + Polynomial coefficients for a denominator of ErfInvImp + calculation for Erf^-1(z) in the interval [0, 0.5]. + + + + Polynomial coefficients for a numerator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.5, 0.75]. + + + + Polynomial coefficients for a denominator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.5, 0.75]. + + + + Polynomial coefficients for a numerator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x less than 3. + + + + Polynomial coefficients for a denominator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x less than 3. + + + + Polynomial coefficients for a numerator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x between 3 and 6. + + + + Polynomial coefficients for a denominator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x between 3 and 6. + + + + Polynomial coefficients for a numerator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x between 6 and 18. + + + + Polynomial coefficients for a denominator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x between 6 and 18. + + + + Polynomial coefficients for a numerator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x between 18 and 44. + + + + Polynomial coefficients for a denominator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x between 18 and 44. + + + + Polynomial coefficients for a numerator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x greater than 44. + + + + Polynomial coefficients for a denominator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x greater than 44. + + + + Calculates the error function. + The value to evaluate. + the error function evaluated at given value. + + + returns 1 if x == double.PositiveInfinity. + returns -1 if x == double.NegativeInfinity. + + + + + Calculates the complementary error function. + The value to evaluate. + the complementary error function evaluated at given value. + + + returns 0 if x == double.PositiveInfinity. + returns 2 if x == double.NegativeInfinity. + + + + + Calculates the inverse error function evaluated at z. + The inverse error function evaluated at given value. + + + returns double.PositiveInfinity if z >= 1.0. + returns double.NegativeInfinity if z <= -1.0. + + + Calculates the inverse error function evaluated at z. + value to evaluate. + the inverse error function evaluated at Z. + + + + Implementation of the error function. + + Where to evaluate the error function. + Whether to compute 1 - the error function. + the error function. + + + Calculates the complementary inverse error function evaluated at z. + The complementary inverse error function evaluated at given value. + We have tested this implementation against the arbitrary precision mpmath library + and found cases where we can only guarantee 9 significant figures correct. + + returns double.PositiveInfinity if z <= 0.0. + returns double.NegativeInfinity if z >= 2.0. + + + calculates the complementary inverse error function evaluated at z. + value to evaluate. + the complementary inverse error function evaluated at Z. + + + + The implementation of the inverse error function. + + First intermediate parameter. + Second intermediate parameter. + Third intermediate parameter. + the inverse error function. + + + + Computes the generalized Exponential Integral function (En). + + The argument of the Exponential Integral function. + Integer power of the denominator term. Generalization index. + The value of the Exponential Integral function. + + This implementation of the computation of the Exponential Integral function follows the derivation in + "Handbook of Mathematical Functions, Applied Mathematics Series, Volume 55", Abramowitz, M., and Stegun, I.A. 1964, reprinted 1968 by + Dover Publications, New York), Chapters 6, 7, and 26. + AND + "Advanced mathematical methods for scientists and engineers", Bender, Carl M.; Steven A. Orszag (1978). page 253 + + + for x > 1 uses continued fraction approach that is often used to compute incomplete gamma. + for 0 < x <= 1 uses Taylor series expansion + + Our unit tests suggest that the accuracy of the Exponential Integral function is correct up to 13 floating point digits. + + + + + Initializes static members of the SpecialFunctions class. + + + + + Computes the factorial function x -> x! of an integer number > 0. The function can represent all number up + to 22! exactly, all numbers up to 170! using a double representation. All larger values will overflow. + + A value value! for value > 0 + + If you need to multiply or divide various such factorials, consider using the logarithmic version + instead so you can add instead of multiply and subtract instead of divide, and + then exponentiate the result using . This will also circumvent the problem that + factorials become very large even for small parameters. + + + + + + Computes the logarithmic factorial function x -> ln(x!) of an integer number > 0. + + A value value! for value > 0 + + + + Computes the binomial coefficient: n choose k. + + A nonnegative value n. + A nonnegative value h. + The binomial coefficient: n choose k. + + + + Computes the natural logarithm of the binomial coefficient: ln(n choose k). + + A nonnegative value n. + A nonnegative value h. + The logarithmic binomial coefficient: ln(n choose k). + + + + Computes the multinomial coefficient: n choose n1, n2, n3, ... + + A nonnegative value n. + An array of nonnegative values that sum to . + The multinomial coefficient. + if is . + If or any of the are negative. + If the sum of all is not equal to . + + + + The order of the approximation. + + + + + Auxiliary variable when evaluating the function. + + + + + Polynomial coefficients for the approximation. + + + + + Computes the logarithm of the Gamma function. + + The argument of the gamma function. + The logarithm of the gamma function. + + This implementation of the computation of the gamma and logarithm of the gamma function follows the derivation in + "An Analysis Of The Lanczos Gamma Approximation", Glendon Ralph Pugh, 2004. + We use the implementation listed on p. 116 which achieves an accuracy of 16 floating point digits. Although 16 digit accuracy + should be sufficient for double values, improving accuracy is possible (see p. 126 in Pugh). + Our unit tests suggest that the accuracy of the Gamma function is correct up to 14 floating point digits. + + + + + Computes the Gamma function. + + The argument of the gamma function. + The logarithm of the gamma function. + + + This implementation of the computation of the gamma and logarithm of the gamma function follows the derivation in + "An Analysis Of The Lanczos Gamma Approximation", Glendon Ralph Pugh, 2004. + We use the implementation listed on p. 116 which should achieve an accuracy of 16 floating point digits. Although 16 digit accuracy + should be sufficient for double values, improving accuracy is possible (see p. 126 in Pugh). + + Our unit tests suggest that the accuracy of the Gamma function is correct up to 13 floating point digits. + + + + + Returns the upper incomplete regularized gamma function + Q(a,x) = 1/Gamma(a) * int(exp(-t)t^(a-1),t=0..x) for real a > 0, x > 0. + + The argument for the gamma function. + The lower integral limit. + The upper incomplete regularized gamma function. + + + + Returns the upper incomplete gamma function + Gamma(a,x) = int(exp(-t)t^(a-1),t=0..x) for real a > 0, x > 0. + + The argument for the gamma function. + The lower integral limit. + The upper incomplete gamma function. + + + + Returns the lower incomplete gamma function + gamma(a,x) = int(exp(-t)t^(a-1),t=0..x) for real a > 0, x > 0. + + The argument for the gamma function. + The upper integral limit. + The lower incomplete gamma function. + + + + Returns the lower incomplete regularized gamma function + P(a,x) = 1/Gamma(a) * int(exp(-t)t^(a-1),t=0..x) for real a > 0, x > 0. + + The argument for the gamma function. + The upper integral limit. + The lower incomplete gamma function. + + + + Returns the inverse P^(-1) of the regularized lower incomplete gamma function + P(a,x) = 1/Gamma(a) * int(exp(-t)t^(a-1),t=0..x) for real a > 0, x > 0, + such that P^(-1)(a,P(a,x)) == x. + + + + + Computes the Digamma function which is mathematically defined as the derivative of the logarithm of the gamma function. + This implementation is based on + Jose Bernardo + Algorithm AS 103: + Psi ( Digamma ) Function, + Applied Statistics, + Volume 25, Number 3, 1976, pages 315-317. + Using the modifications as in Tom Minka's lightspeed toolbox. + + The argument of the digamma function. + The value of the DiGamma function at . + + + + Computes the inverse Digamma function: this is the inverse of the logarithm of the gamma function. This function will + only return solutions that are positive. + This implementation is based on the bisection method. + + The argument of the inverse digamma function. + The positive solution to the inverse DiGamma function at . + + + + Computes the 'th Harmonic number. + + The Harmonic number which needs to be computed. + The t'th Harmonic number. + + + + Compute the generalized harmonic number of order n of m. (1 + 1/2^m + 1/3^m + ... + 1/n^m) + + The order parameter. + The power parameter. + General Harmonic number. + + + + Computes the logistic function. see: http://en.wikipedia.org/wiki/Logistic + + The parameter for which to compute the logistic function. + The logistic function of . + + + + Computes the logit function, the inverse of the sigmoid logistic function. see: http://en.wikipedia.org/wiki/Logit + + The parameter for which to compute the logit function. This number should be + between 0 and 1. + The logarithm of divided by 1.0 - . + + + + ************************************** + COEFFICIENTS FOR METHODS bessi0 * + ************************************** + + Chebyshev coefficients for exp(-x) I0(x) + in the interval [0, 8]. + + lim(x->0){ exp(-x) I0(x) } = 1. + + + + Chebyshev coefficients for exp(-x) sqrt(x) I0(x) + in the inverted interval [8, infinity]. + + lim(x->inf){ exp(-x) sqrt(x) I0(x) } = 1/sqrt(2pi). + + + + + ************************************** + COEFFICIENTS FOR METHODS bessi1 * + ************************************** + + Chebyshev coefficients for exp(-x) I1(x) / x + in the interval [0, 8]. + + lim(x->0){ exp(-x) I1(x) / x } = 1/2. + + + + Chebyshev coefficients for exp(-x) sqrt(x) I1(x) + in the inverted interval [8, infinity]. + + lim(x->inf){ exp(-x) sqrt(x) I1(x) } = 1/sqrt(2pi). + + + + + ************************************** + COEFFICIENTS FOR METHODS bessk0, bessk0e * + ************************************** + + Chebyshev coefficients for K0(x) + log(x/2) I0(x) + in the interval [0, 2]. The odd order coefficients are all + zero; only the even order coefficients are listed. + + lim(x->0){ K0(x) + log(x/2) I0(x) } = -EUL. + + + + Chebyshev coefficients for exp(x) sqrt(x) K0(x) + in the inverted interval [2, infinity]. + + lim(x->inf){ exp(x) sqrt(x) K0(x) } = sqrt(pi/2). + + + + + ************************************** + COEFFICIENTS FOR METHODS bessk1, bessk1e * + ************************************** + + Chebyshev coefficients for x(K1(x) - log(x/2) I1(x)) + in the interval [0, 2]. + + lim(x->0){ x(K1(x) - log(x/2) I1(x)) } = 1. + + + + Chebyshev coefficients for exp(x) sqrt(x) K1(x) + in the interval [2, infinity]. + + lim(x->inf){ exp(x) sqrt(x) K1(x) } = sqrt(pi/2). + + + + Returns the modified Bessel function of first kind, order 0 of the argument. +

+ The function is defined as i0(x) = j0( ix ). +

+ The range is partitioned into the two intervals [0, 8] and + (8, infinity). Chebyshev polynomial expansions are employed + in each interval. +

+ The value to compute the bessel function of. + +
+ + Returns the modified Bessel function of first kind, + order 1 of the argument. +

+ The function is defined as i1(x) = -i j1( ix ). +

+ The range is partitioned into the two intervals [0, 8] and + (8, infinity). Chebyshev polynomial expansions are employed + in each interval. +

+ The value to compute the bessel function of. + +
+ + Returns the modified Bessel function of the second kind + of order 0 of the argument. +

+ The range is partitioned into the two intervals [0, 8] and + (8, infinity). Chebyshev polynomial expansions are employed + in each interval. +

+ The value to compute the bessel function of. + +
+ + Returns the exponentially scaled modified Bessel function + of the second kind of order 0 of the argument. + + The value to compute the bessel function of. + + + + Returns the modified Bessel function of the second kind + of order 1 of the argument. +

+ The range is partitioned into the two intervals [0, 2] and + (2, infinity). Chebyshev polynomial expansions are employed + in each interval. +

+ The value to compute the bessel function of. + +
+ + Returns the exponentially scaled modified Bessel function + of the second kind of order 1 of the argument. +

+ k1e(x) = exp(x) * k1(x). +

+ The value to compute the bessel function of. + +
+ + + Returns the modified Struve function of order 0. + + The value to compute the function of. + + + + Returns the modified Struve function of order 1. + + The value to compute the function of. + + + + Returns the difference between the Bessel I0 and Struve L0 functions. + + The value to compute the function of. + + + + Returns the difference between the Bessel I1 and Struve L1 functions. + + The value to compute the function of. + + + + Numerically stable exponential minus one, i.e. x -> exp(x)-1 + + A number specifying a power. + Returns exp(power)-1. + + + + Numerically stable hypotenuse of a right angle triangle, i.e. (a,b) -> sqrt(a^2 + b^2) + + The length of side a of the triangle. + The length of side b of the triangle. + Returns sqrt(a2 + b2) without underflow/overflow. + + + + Numerically stable hypotenuse of a right angle triangle, i.e. (a,b) -> sqrt(a^2 + b^2) + + The length of side a of the triangle. + The length of side b of the triangle. + Returns sqrt(a2 + b2) without underflow/overflow. + + + + Numerically stable hypotenuse of a right angle triangle, i.e. (a,b) -> sqrt(a^2 + b^2) + + The length of side a of the triangle. + The length of side b of the triangle. + Returns sqrt(a2 + b2) without underflow/overflow. + + + + Numerically stable hypotenuse of a right angle triangle, i.e. (a,b) -> sqrt(a^2 + b^2) + + The length of side a of the triangle. + The length of side b of the triangle. + Returns sqrt(a2 + b2) without underflow/overflow. + + + + Evaluation functions, useful for function approximation. + + + + + Evaluate a polynomial at point x. + Coefficients are ordered by power with power k at index k. + Example: coefficients [3,-1,2] represent y=2x^2-x+3. + + The location where to evaluate the polynomial at. + The coefficients of the polynomial, coefficient for power k at index k. + + + + Evaluate a polynomial at point x. + Coefficients are ordered by power with power k at index k. + Example: coefficients [3,-1,2] represent y=2x^2-x+3. + + The location where to evaluate the polynomial at. + The coefficients of the polynomial, coefficient for power k at index k. + + + + Evaluate a polynomial at point x. + Coefficients are ordered by power with power k at index k. + Example: coefficients [3,-1,2] represent y=2x^2-x+3. + + The location where to evaluate the polynomial at. + The coefficients of the polynomial, coefficient for power k at index k. + + + + Numerically stable series summation + + provides the summands sequentially + Sum + + + Evaluates the series of Chebyshev polynomials Ti at argument x/2. + The series is given by +
+                  N-1
+                   - '
+            y  =   >   coef[i] T (x/2)
+                   -            i
+                  i=0
+            
+ Coefficients are stored in reverse order, i.e. the zero + order term is last in the array. Note N is the number of + coefficients, not the order. +

+ If coefficients are for the interval a to b, x must + have been transformed to x -> 2(2x - b - a)/(b-a) before + entering the routine. This maps x from (a, b) to (-1, 1), + over which the Chebyshev polynomials are defined. +

+ If the coefficients are for the inverted interval, in + which (a, b) is mapped to (1/b, 1/a), the transformation + required is x -> 2(2ab/x - b - a)/(b-a). If b is infinity, + this becomes x -> 4a/x - 1. +

+ SPEED: +

+ Taking advantage of the recurrence properties of the + Chebyshev polynomials, the routine requires one more + addition per loop than evaluating a nested polynomial of + the same degree. +

+ The coefficients of the polynomial. + Argument to the polynomial. + + Reference: https://bpm2.svn.codeplex.com/svn/Common.Numeric/Arithmetic.cs +

+ Marked as Deprecated in + http://people.apache.org/~isabel/mahout_site/mahout-matrix/apidocs/org/apache/mahout/jet/math/Arithmetic.html + + + +

+ Summation of Chebyshev polynomials, using the Clenshaw method with Reinsch modification. + + The no. of terms in the sequence. + The coefficients of the Chebyshev series, length n+1. + The value at which the series is to be evaluated. + + ORIGINAL AUTHOR: + Dr. Allan J. MacLeod; Dept. of Mathematics and Statistics, University of Paisley; High St., PAISLEY, SCOTLAND + REFERENCES: + "An error analysis of the modified Clenshaw method for evaluating Chebyshev and Fourier series" + J. Oliver, J.I.M.A., vol. 20, 1977, pp379-391 + +
+ + + Valley-shaped Rosenbrock function for 2 dimensions: (x,y) -> (1-x)^2 + 100*(y-x^2)^2. + This function has a global minimum at (1,1) with f(1,1) = 0. + Common range: [-5,10] or [-2.048,2.048]. + + + https://en.wikipedia.org/wiki/Rosenbrock_function + http://www.sfu.ca/~ssurjano/rosen.html + + + + + Valley-shaped Rosenbrock function for 2 or more dimensions. + This function have a global minimum of all ones and, for 8 > N > 3, a local minimum at (-1,1,...,1). + + + https://en.wikipedia.org/wiki/Rosenbrock_function + http://www.sfu.ca/~ssurjano/rosen.html + + + + + Himmelblau, a multi-modal function: (x,y) -> (x^2+y-11)^2 + (x+y^2-7)^2 + This function has 4 global minima with f(x,y) = 0. + Common range: [-6,6]. + Named after David Mautner Himmelblau + + + https://en.wikipedia.org/wiki/Himmelblau%27s_function + + + + + Rastrigin, a highly multi-modal function with many local minima. + Global minimum of all zeros with f(0) = 0. + Common range: [-5.12,5.12]. + + + https://en.wikipedia.org/wiki/Rastrigin_function + http://www.sfu.ca/~ssurjano/rastr.html + + + + + Drop-Wave, a multi-modal and highly complex function with many local minima. + Global minimum of all zeros with f(0) = -1. + Common range: [-5.12,5.12]. + + + http://www.sfu.ca/~ssurjano/drop.html + + + + + Ackley, a function with many local minima. It is nearly flat in outer regions but has a large hole at the center. + Global minimum of all zeros with f(0) = 0. + Common range: [-32.768, 32.768]. + + + http://www.sfu.ca/~ssurjano/ackley.html + + + + + Bowl-shaped first Bohachevsky function. + Global minimum of all zeros with f(0,0) = 0. + Common range: [-100, 100] + + + http://www.sfu.ca/~ssurjano/boha.html + + + + + Plate-shaped Matyas function. + Global minimum of all zeros with f(0,0) = 0. + Common range: [-10, 10]. + + + http://www.sfu.ca/~ssurjano/matya.html + + + + + Valley-shaped six-hump camel back function. + Two global minima and four local minima. Global minima with f(x) ) -1.0316 at (0.0898,-0.7126) and (-0.0898,0.7126). + Common range: x in [-3,3], y in [-2,2]. + + + http://www.sfu.ca/~ssurjano/camel6.html + + + + + Statistics operating on arrays assumed to be unsorted. + WARNING: Methods with the Inplace-suffix may modify the data array by reordering its entries. + + + + + + + + Returns the smallest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the smallest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the largest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the largest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the smallest value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the largest value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the smallest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the largest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the geometric mean of the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the harmonic mean of the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population variance from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the population variance from the full population provided as unsorted array. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population standard deviation from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the population standard deviation from the full population provided as unsorted array. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population variance from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN and NaN for variance if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population standard deviation from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN and NaN for standard deviation if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population covariance from the provided two sample arrays. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + First sample array. + Second sample array. + + + + Evaluates the population covariance from the full population provided as two arrays. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + First population array. + Second population array. + + + + Estimates the root mean square (RMS) also known as quadratic mean from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the order statistic (order 1..N) from the unsorted data array. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + One-based order of the statistic, must be between 1 and N (inclusive). + + + + Estimates the median value from the unsorted data array. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the p-Percentile value from the unsorted data array. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Percentile selector, between 0 and 100 (inclusive). + + + + Estimates the first quartile value from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the third quartile value from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the inter-quartile range from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates {min, lower-quantile, median, upper-quantile, max} from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the tau-th quantile from the unsorted data array. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Quantile selector, between 0.0 and 1.0 (inclusive). + + R-8, SciPy-(1/3,1/3): + Linear interpolation of the approximate medians for order statistics. + When tau < (2/3) / (N + 1/3), use x1. When tau >= (N - 1/3) / (N + 1/3), use xN. + + + + + Estimates the tau-th quantile from the unsorted data array. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile defintion can be specified + by 4 parameters a, b, c and d, consistent with Mathematica. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Quantile selector, between 0.0 and 1.0 (inclusive) + a-parameter + b-parameter + c-parameter + d-parameter + + + + Estimates the tau-th quantile from the unsorted data array. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Quantile selector, between 0.0 and 1.0 (inclusive) + Quantile definition, to choose what product/definition it should be consistent with + + + + Evaluates the rank of each entry of the unsorted data array. + The rank definition can be specified to be compatible + with an existing system. + WARNING: Works inplace and can thus causes the data array to be reordered. + + + + + Estimates the arithmetic sample mean from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the geometric mean of the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the harmonic mean of the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population variance from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the population variance from the full population provided as unsorted array. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population standard deviation from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the population standard deviation from the full population provided as unsorted array. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population variance from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN and NaN for variance if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population standard deviation from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN and NaN for standard deviation if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population covariance from the provided two sample arrays. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + First sample array. + Second sample array. + + + + Evaluates the population covariance from the full population provided as two arrays. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + First population array. + Second population array. + + + + Estimates the root mean square (RMS) also known as quadratic mean from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the smallest value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the smallest value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the smallest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the largest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the geometric mean of the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the harmonic mean of the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population variance from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the population variance from the full population provided as unsorted array. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population standard deviation from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the population standard deviation from the full population provided as unsorted array. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population variance from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN and NaN for variance if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population standard deviation from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN and NaN for standard deviation if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population covariance from the provided two sample arrays. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + First sample array. + Second sample array. + + + + Evaluates the population covariance from the full population provided as two arrays. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + First population array. + Second population array. + + + + Estimates the root mean square (RMS) also known as quadratic mean from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the order statistic (order 1..N) from the unsorted data array. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + One-based order of the statistic, must be between 1 and N (inclusive). + + + + Estimates the median value from the unsorted data array. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the p-Percentile value from the unsorted data array. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Percentile selector, between 0 and 100 (inclusive). + + + + Estimates the first quartile value from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the third quartile value from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the inter-quartile range from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates {min, lower-quantile, median, upper-quantile, max} from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the tau-th quantile from the unsorted data array. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Quantile selector, between 0.0 and 1.0 (inclusive). + + R-8, SciPy-(1/3,1/3): + Linear interpolation of the approximate medians for order statistics. + When tau < (2/3) / (N + 1/3), use x1. When tau >= (N - 1/3) / (N + 1/3), use xN. + + + + + Estimates the tau-th quantile from the unsorted data array. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile defintion can be specified + by 4 parameters a, b, c and d, consistent with Mathematica. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Quantile selector, between 0.0 and 1.0 (inclusive) + a-parameter + b-parameter + c-parameter + d-parameter + + + + Estimates the tau-th quantile from the unsorted data array. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Quantile selector, between 0.0 and 1.0 (inclusive) + Quantile definition, to choose what product/definition it should be consistent with + + + + Evaluates the rank of each entry of the unsorted data array. + The rank definition can be specified to be compatible + with an existing system. + WARNING: Works inplace and can thus causes the data array to be reordered. + + + + + A class with correlation measures between two datasets. + + + + + Computes the Pearson Product-Moment Correlation coefficient. + + Sample data A. + Sample data B. + The Pearson product-moment correlation coefficient. + + + + Computes the Weighted Pearson Product-Moment Correlation coefficient. + + Sample data A. + Sample data B. + Corresponding weights of data. + The Weighted Pearson product-moment correlation coefficient. + + + + Computes the Pearson Product-Moment Correlation matrix. + + Array of sample data vectors. + The Pearson product-moment correlation matrix. + + + + Computes the Pearson Product-Moment Correlation matrix. + + Enumerable of sample data vectors. + The Pearson product-moment correlation matrix. + + + + Computes the Spearman Ranked Correlation coefficient. + + Sample data series A. + Sample data series B. + The Spearman ranked correlation coefficient. + + + + Computes the Spearman Ranked Correlation matrix. + + Array of sample data vectors. + The Spearman ranked correlation matrix. + + + + Computes the Spearman Ranked Correlation matrix. + + Enumerable of sample data vectors. + The Spearman ranked correlation matrix. + + + + Computes the basic statistics of data set. The class meets the + NIST standard of accuracy for mean, variance, and standard deviation + (the only statistics they provide exact values for) and exceeds them + in increased accuracy mode. + Recommendation: consider to use RunningStatistics instead. + + + This type declares a DataContract for out of the box ephemeral serialization + with engines like DataContractSerializer, Protocol Buffers and FsPickler, + but does not guarantee any compatibility between versions. + It is not recommended to rely on this mechanism for durable persistance. + + + + + Initializes a new instance of the class. + + The sample data. + + If set to true, increased accuracy mode used. + Increased accuracy mode uses types for internal calculations. + + + Don't use increased accuracy for data sets containing large values (in absolute value). + This may cause the calculations to overflow. + + + + + Initializes a new instance of the class. + + The sample data. + + If set to true, increased accuracy mode used. + Increased accuracy mode uses types for internal calculations. + + + Don't use increased accuracy for data sets containing large values (in absolute value). + This may cause the calculations to overflow. + + + + + Gets the size of the sample. + + The size of the sample. + + + + Gets the sample mean. + + The sample mean. + + + + Gets the unbiased population variance estimator (on a dataset of size N will use an N-1 normalizer). + + The sample variance. + + + + Gets the unbiased population standard deviation (on a dataset of size N will use an N-1 normalizer). + + The sample standard deviation. + + + + Gets the sample skewness. + + The sample skewness. + Returns zero if is less than three. + + + + Gets the sample kurtosis. + + The sample kurtosis. + Returns zero if is less than four. + + + + Gets the maximum sample value. + + The maximum sample value. + + + + Gets the minimum sample value. + + The minimum sample value. + + + + Computes descriptive statistics from a stream of data values. + + A sequence of datapoints. + + + + Computes descriptive statistics from a stream of nullable data values. + + A sequence of datapoints. + + + + Computes descriptive statistics from a stream of data values. + + A sequence of datapoints. + + + + Computes descriptive statistics from a stream of nullable data values. + + A sequence of datapoints. + + + + Internal use. Method use for setting the statistics. + + For setting Mean. + For setting Variance. + For setting Skewness. + For setting Kurtosis. + For setting Minimum. + For setting Maximum. + For setting Count. + + + + A consists of a series of s, + each representing a region limited by a lower bound (exclusive) and an upper bound (inclusive). + + + This type declares a DataContract for out of the box ephemeral serialization + with engines like DataContractSerializer, Protocol Buffers and FsPickler, + but does not guarantee any compatibility between versions. + It is not recommended to rely on this mechanism for durable persistance. + + + + + This IComparer performs comparisons between a point and a bucket. + + + + + Compares a point and a bucket. The point will be encapsulated in a bucket with width 0. + + The first bucket to compare. + The second bucket to compare. + -1 when the point is less than this bucket, 0 when it is in this bucket and 1 otherwise. + + + + Lower Bound of the Bucket. + + + + + Upper Bound of the Bucket. + + + + + The number of datapoints in the bucket. + + + Value may be NaN if this was constructed as a argument. + + + + + Initializes a new instance of the Bucket class. + + + + + Constructs a Bucket that can be used as an argument for a + like when performing a Binary search. + + Value to look for + + + + Creates a copy of the Bucket with the lowerbound, upperbound and counts exactly equal. + + A cloned Bucket object. + + + + Width of the Bucket. + + + + + True if this is a single point argument for + when performing a Binary search. + + + + + Default comparer. + + + + + This method check whether a point is contained within this bucket. + + The point to check. + + 0 if the point falls within the bucket boundaries; + -1 if the point is smaller than the bucket, + +1 if the point is larger than the bucket. + + + + Comparison of two disjoint buckets. The buckets cannot be overlapping. + + + 0 if UpperBound and LowerBound are bit-for-bit equal + 1 if This bucket is lower that the compared bucket + -1 otherwise + + + + + Checks whether two Buckets are equal. + + + UpperBound and LowerBound are compared bit-for-bit, but This method tolerates a + difference in Count given by . + + + + + Provides a hash code for this bucket. + + + + + Formats a human-readable string for this bucket. + + + + + A class which computes histograms of data. + + + + + Contains all the Buckets of the Histogram. + + + + + Indicates whether the elements of buckets are currently sorted. + + + + + Initializes a new instance of the Histogram class. + + + + + Constructs a Histogram with a specific number of equally sized buckets. The upper and lower bound of the histogram + will be set to the smallest and largest datapoint. + + The datasequence to build a histogram on. + The number of buckets to use. + + + + Constructs a Histogram with a specific number of equally sized buckets. + + The datasequence to build a histogram on. + The number of buckets to use. + The histogram lower bound. + The histogram upper bound. + + + + Add one data point to the histogram. If the datapoint falls outside the range of the histogram, + the lowerbound or upperbound will automatically adapt. + + The datapoint which we want to add. + + + + Add a sequence of data point to the histogram. If the datapoint falls outside the range of the histogram, + the lowerbound or upperbound will automatically adapt. + + The sequence of datapoints which we want to add. + + + + Adds a Bucket to the Histogram. + + + + + Sort the buckets if needed. + + + + + Returns the Bucket that contains the value v. + + The point to search the bucket for. + A copy of the bucket containing point . + + + + Returns the index in the Histogram of the Bucket + that contains the value v. + + The point to search the bucket index for. + The index of the bucket containing the point. + + + + Returns the lower bound of the histogram. + + + + + Returns the upper bound of the histogram. + + + + + Gets the n'th bucket. + + The index of the bucket to be returned. + A copy of the n'th bucket. + + + + Gets the number of buckets. + + + + + Gets the total number of datapoints in the histogram. + + + + + Prints the buckets contained in the . + + + + + A hybrid Monte Carlo sampler for multivariate distributions. + + + + + Number of parameters in the density function. + + + + + Distribution to sample momentum from. + + + + + Standard deviations used in the sampling of different components of the + momentum. + + + + + Gets or sets the standard deviations used in the sampling of different components of the + momentum. + + When the length of pSdv is not the same as Length. + + + + Constructs a new Hybrid Monte Carlo sampler for a multivariate probability distribution. + The components of the momentum will be sampled from a normal distribution with standard deviation + 1 using the default random + number generator. A three point estimation will be used for differentiation. + This constructor will set the burn interval. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + When the number of burnInterval iteration is negative. + + + + Constructs a new Hybrid Monte Carlo sampler for a multivariate probability distribution. + The components of the momentum will be sampled from a normal distribution with standard deviation + specified by pSdv using the default random + number generator. A three point estimation will be used for differentiation. + This constructor will set the burn interval. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + The standard deviations of the normal distributions that are used to sample + the components of the momentum. + When the number of burnInterval iteration is negative. + + + + Constructs a new Hybrid Monte Carlo sampler for a multivariate probability distribution. + The components of the momentum will be sampled from a normal distribution with standard deviation + specified by pSdv using the a random number generator provided by the user. + A three point estimation will be used for differentiation. + This constructor will set the burn interval. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + The standard deviations of the normal distributions that are used to sample + the components of the momentum. + Random number generator used for sampling the momentum. + When the number of burnInterval iteration is negative. + + + + Constructs a new Hybrid Monte Carlo sampler for a multivariate probability distribution. + The components of the momentum will be sampled from a normal distribution with standard deviations + given by pSdv. This constructor will set the burn interval, the method used for + numerical differentiation and the random number generator. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + The standard deviations of the normal distributions that are used to sample + the components of the momentum. + Random number generator used for sampling the momentum. + The method used for numerical differentiation. + When the number of burnInterval iteration is negative. + When the length of pSdv is not the same as x0. + + + + Initialize parameters. + + The current location of the sampler. + + + + Checking that the location and the momentum are of the same dimension and that each component is positive. + + The standard deviations used for sampling the momentum. + When the length of pSdv is not the same as Length or if any + component is negative. + When pSdv is null. + + + + Use for copying objects in the Burn method. + + The source of copying. + A copy of the source object. + + + + Use for creating temporary objects in the Burn method. + + An object of type T. + + + + + + + + + + + + + Samples the momentum from a normal distribution. + + The momentum to be randomized. + + + + The default method used for computing the gradient. Uses a simple three point estimation. + + Function which the gradient is to be evaluated. + The location where the gradient is to be evaluated. + The gradient of the function at the point x. + + + + The Hybrid (also called Hamiltonian) Monte Carlo produces samples from distribution P using a set + of Hamiltonian equations to guide the sampling process. It uses the negative of the log density as + a potential energy, and a randomly generated momentum to set up a Hamiltonian system, which is then used + to sample the distribution. This can result in a faster convergence than the random walk Metropolis sampler + (). + + The type of samples this sampler produces. + + + + The delegate type that defines a derivative evaluated at a certain point. + + Function to be differentiated. + Value where the derivative is computed. + + + + Evaluates the energy function of the target distribution. + + + + + The current location of the sampler. + + + + + The number of burn iterations between two samples. + + + + + The size of each step in the Hamiltonian equation. + + + + + The number of iterations in the Hamiltonian equation. + + + + + The algorithm used for differentiation. + + + + + Gets or sets the number of iterations in between returning samples. + + When burn interval is negative. + + + + Gets or sets the number of iterations in the Hamiltonian equation. + + When frogleap steps is negative or zero. + + + + Gets or sets the size of each step in the Hamiltonian equation. + + When step size is negative or zero. + + + + Constructs a new Hybrid Monte Carlo sampler. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + Random number generator used for sampling the momentum. + The method used for differentiation. + When the number of burnInterval iteration is negative. + When either x0, pdfLnP or diff is null. + + + + Returns a sample from the distribution P. + + + + + This method runs the sampler for a number of iterations without returning a sample + + + + + Method used to update the sample location. Used in the end of the loop. + + The old energy. + The old gradient/derivative of the energy. + The new sample. + The new gradient/derivative of the energy. + The new energy. + The difference between the old Hamiltonian and new Hamiltonian. Use to determine + if an update should take place. + + + + Use for creating temporary objects in the Burn method. + + An object of type T. + + + + Use for copying objects in the Burn method. + + The source of copying. + A copy of the source object. + + + + Method for doing dot product. + + First vector/scalar in the product. + Second vector/scalar in the product. + + + + Method for adding, multiply the second vector/scalar by factor and then + add it to the first vector/scalar. + + First vector/scalar. + Scalar factor multiplying by the second vector/scalar. + Second vector/scalar. + + + + Multiplying the second vector/scalar by factor and then subtract it from + the first vector/scalar. + + First vector/scalar. + Scalar factor to be multiplied to the second vector/scalar. + Second vector/scalar. + + + + Method for sampling a random momentum. + + Momentum to be randomized. + + + + The Hamiltonian equations that is used to produce the new sample. + + + + + Method to compute the Hamiltonian used in the method. + + The momentum. + The energy. + Hamiltonian=E+p.p/2 + + + + Method to check and set a quantity to a non-negative value. + + Proposed value to be checked. + Returns value if it is greater than or equal to zero. + Throws when value is negative. + + + + Method to check and set a quantity to a non-negative value. + + Proposed value to be checked. + Returns value if it is greater than to zero. + Throws when value is negative or zero. + + + + Method to check and set a quantity to a non-negative value. + + Proposed value to be checked. + Returns value if it is greater than zero. + Throws when value is negative or zero. + + + + Provides utilities to analysis the convergence of a set of samples from + a . + + + + + Computes the auto correlations of a series evaluated by a function f. + + The series for computing the auto correlation. + The lag in the series + The function used to evaluate the series. + The auto correlation. + Throws if lag is zero or if lag is + greater than or equal to the length of Series. + + + + Computes the effective size of the sample when evaluated by a function f. + + The samples. + The function use for evaluating the series. + The effective size when auto correlation is taken into account. + + + + A method which samples datapoints from a proposal distribution. The implementation of this sampler + is stateless: no variables are saved between two calls to Sample. This proposal is different from + in that it doesn't take any parameters; it samples random + variables from the whole domain. + + The type of the datapoints. + A sample from the proposal distribution. + + + + A method which samples datapoints from a proposal distribution given an initial sample. The implementation + of this sampler is stateless: no variables are saved between two calls to Sample. This proposal is different from + in that it samples locally around an initial point. In other words, it + makes a small local move rather than producing a global sample from the proposal. + + The type of the datapoints. + The initial sample. + A sample from the proposal distribution. + + + + A function which evaluates a density. + + The type of data the distribution is over. + The sample we want to evaluate the density for. + + + + A function which evaluates a log density. + + The type of data the distribution is over. + The sample we want to evaluate the log density for. + + + + A function which evaluates the log of a transition kernel probability. + + The type for the space over which this transition kernel is defined. + The new state in the transition. + The previous state in the transition. + The log probability of the transition. + + + + The interface which every sampler must implement. + + The type of samples this sampler produces. + + + + The random number generator for this class. + + + + + Keeps track of the number of accepted samples. + + + + + Keeps track of the number of calls to the proposal sampler. + + + + + Initializes a new instance of the class. + + Thread safe instances are two and half times slower than non-thread + safe classes. + + + + Gets or sets the random number generator. + + When the random number generator is null. + + + + Returns one sample. + + + + + Returns a number of samples. + + The number of samples we want. + An array of samples. + + + + Gets the acceptance rate of the sampler. + + + + + Metropolis-Hastings sampling produces samples from distribition P by sampling from a proposal distribution Q + and accepting/rejecting based on the density of P. Metropolis-Hastings sampling doesn't require that the + proposal distribution Q is symmetric in comparison to . It does need to + be able to evaluate the proposal sampler's log density though. All densities are required to be in log space. + + The Metropolis-Hastings sampler is a stateful sampler. It keeps track of where it currently is in the domain + of the distribution P. + + The type of samples this sampler produces. + + + + Evaluates the log density function of the target distribution. + + + + + Evaluates the log transition probability for the proposal distribution. + + + + + A function which samples from a proposal distribution. + + + + + The current location of the sampler. + + + + + The log density at the current location. + + + + + The number of burn iterations between two samples. + + + + + Constructs a new Metropolis-Hastings sampler using the default random number generator. This + constructor will set the burn interval. + + The initial sample. + The log density of the distribution we want to sample from. + The log transition probability for the proposal distribution. + A method that samples from the proposal distribution. + The number of iterations in between returning samples. + When the number of burnInterval iteration is negative. + + + + Gets or sets the number of iterations in between returning samples. + + When burn interval is negative. + + + + This method runs the sampler for a number of iterations without returning a sample + + + + + Returns a sample from the distribution P. + + + + + Metropolis sampling produces samples from distribition P by sampling from a proposal distribution Q + and accepting/rejecting based on the density of P. Metropolis sampling requires that the proposal + distribution Q is symmetric. All densities are required to be in log space. + + The Metropolis sampler is a stateful sampler. It keeps track of where it currently is in the domain + of the distribution P. + + The type of samples this sampler produces. + + + + Evaluates the log density function of the sampling distribution. + + + + + A function which samples from a proposal distribution. + + + + + The current location of the sampler. + + + + + The log density at the current location. + + + + + The number of burn iterations between two samples. + + + + + Constructs a new Metropolis sampler using the default random number generator. + + The initial sample. + The log density of the distribution we want to sample from. + A method that samples from the symmetric proposal distribution. + The number of iterations in between returning samples. + When the number of burnInterval iteration is negative. + + + + Gets or sets the number of iterations in between returning samples. + + When burn interval is negative. + + + + This method runs the sampler for a number of iterations without returning a sample + + + + + Returns a sample from the distribution P. + + + + + Rejection sampling produces samples from distribition P by sampling from a proposal distribution Q + and accepting/rejecting based on the density of P and Q. The density of P and Q don't need to + to be normalized, but we do need that for each x, P(x) < Q(x). + + The type of samples this sampler produces. + + + + Evaluates the density function of the sampling distribution. + + + + + Evaluates the density function of the proposal distribution. + + + + + A function which samples from a proposal distribution. + + + + + Constructs a new rejection sampler using the default random number generator. + + The density of the distribution we want to sample from. + The density of the proposal distribution. + A method that samples from the proposal distribution. + + + + Returns a sample from the distribution P. + + When the algorithms detects that the proposal + distribution doesn't upper bound the target distribution. + + + + A hybrid Monte Carlo sampler for univariate distributions. + + + + + Distribution to sample momentum from. + + + + + Standard deviations used in the sampling of the + momentum. + + + + + Gets or sets the standard deviation used in the sampling of the + momentum. + + When standard deviation is negative. + + + + Constructs a new Hybrid Monte Carlo sampler for a univariate probability distribution. + The momentum will be sampled from a normal distribution with standard deviation + specified by pSdv using the default random + number generator. A three point estimation will be used for differentiation. + This constructor will set the burn interval. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + The standard deviation of the normal distribution that is used to sample + the momentum. + When the number of burnInterval iteration is negative. + + + + Constructs a new Hybrid Monte Carlo sampler for a univariate probability distribution. + The momentum will be sampled from a normal distribution with standard deviation + specified by pSdv using a random + number generator provided by the user. A three point estimation will be used for differentiation. + This constructor will set the burn interval. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + The standard deviation of the normal distribution that is used to sample + the momentum. + Random number generator used to sample the momentum. + When the number of burnInterval iteration is negative. + + + + Constructs a new Hybrid Monte Carlo sampler for a multivariate probability distribution. + The momentum will be sampled from a normal distribution with standard deviation + given by pSdv using a random + number generator provided by the user. This constructor will set both the burn interval and the method used for + numerical differentiation. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + The standard deviation of the normal distribution that is used to sample + the momentum. + The method used for numerical differentiation. + Random number generator used for sampling the momentum. + When the number of burnInterval iteration is negative. + + + + Use for copying objects in the Burn method. + + The source of copying. + A copy of the source object. + + + + Use for creating temporary objects in the Burn method. + + An object of type T. + + + + + + + + + + + + + Samples the momentum from a normal distribution. + + The momentum to be randomized. + + + + The default method used for computing the derivative. Uses a simple three point estimation. + + Function for which the derivative is to be evaluated. + The location where the derivative is to be evaluated. + The derivative of the function at the point x. + + + + Slice sampling produces samples from distribition P by uniformly sampling from under the pdf of P using + a technique described in "Slice Sampling", R. Neal, 2003. All densities are required to be in log space. + + The slice sampler is a stateful sampler. It keeps track of where it currently is in the domain + of the distribution P. + + + + + Evaluates the log density function of the target distribution. + + + + + The current location of the sampler. + + + + + The log density at the current location. + + + + + The number of burn iterations between two samples. + + + + + The scale of the slice sampler. + + + + + Constructs a new Slice sampler using the default random + number generator. The burn interval will be set to 0. + + The initial sample. + The density of the distribution we want to sample from. + The scale factor of the slice sampler. + When the scale of the slice sampler is not positive. + + + + Constructs a new slice sampler using the default random number generator. It + will set the number of burnInterval iterations and run a burnInterval phase. + + The initial sample. + The density of the distribution we want to sample from. + The number of iterations in between returning samples. + The scale factor of the slice sampler. + When the number of burnInterval iteration is negative. + When the scale of the slice sampler is not positive. + + + + Gets or sets the number of iterations in between returning samples. + + When burn interval is negative. + + + + Gets or sets the scale of the slice sampler. + + + + + This method runs the sampler for a number of iterations without returning a sample + + + + + Returns a sample from the distribution P. + + + + + Running statistics over a window of data, allows updating by adding values. + + + + + Gets the total number of samples. + + + + + Returns the minimum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + + + + Returns the maximum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + + + + Evaluates the sample mean, an estimate of the population mean. + Returns NaN if data is empty or if any entry is NaN. + + + + + Estimates the unbiased population variance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + + + + Evaluates the variance from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + + + + Estimates the unbiased population standard deviation from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + + + + Evaluates the standard deviation from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + + + + Update the running statistics by adding another observed sample (in-place). + + + + + Update the running statistics by adding a sequence of observed sample (in-place). + + + + Replace ties with their mean (non-integer ranks). Default. + + + Replace ties with their minimum (typical sports ranking). + + + Replace ties with their maximum. + + + Permutation with increasing values at each index of ties. + + + + Running statistics accumulator, allows updating by adding values + or by combining two accumulators. + + + This type declares a DataContract for out of the box ephemeral serialization + with engines like DataContractSerializer, Protocol Buffers and FsPickler, + but does not guarantee any compatibility between versions. + It is not recommended to rely on this mechanism for durable persistance. + + + + + Gets the total number of samples. + + + + + Returns the minimum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + + + + Returns the maximum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + + + + Evaluates the sample mean, an estimate of the population mean. + Returns NaN if data is empty or if any entry is NaN. + + + + + Estimates the unbiased population variance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + + + + Evaluates the variance from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + + + + Estimates the unbiased population standard deviation from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + + + + Evaluates the standard deviation from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + + + + Estimates the unbiased population skewness from the provided samples. + Uses a normalizer (Bessel's correction; type 2). + Returns NaN if data has less than three entries or if any entry is NaN. + + + + + Evaluates the population skewness from the full population. + Does not use a normalizer and would thus be biased if applied to a subset (type 1). + Returns NaN if data has less than two entries or if any entry is NaN. + + + + + Estimates the unbiased population kurtosis from the provided samples. + Uses a normalizer (Bessel's correction; type 2). + Returns NaN if data has less than four entries or if any entry is NaN. + + + + + Evaluates the population kurtosis from the full population. + Does not use a normalizer and would thus be biased if applied to a subset (type 1). + Returns NaN if data has less than three entries or if any entry is NaN. + + + + + Update the running statistics by adding another observed sample (in-place). + + + + + Update the running statistics by adding a sequence of observed sample (in-place). + + + + + Create a new running statistics over the combined samples of two existing running statistics. + + + + + Statistics operating on an array already sorted ascendingly. + + + + + + + + Returns the smallest value from the sorted data array (ascending). + + Sample array, must be sorted ascendingly. + + + + Returns the largest value from the sorted data array (ascending). + + Sample array, must be sorted ascendingly. + + + + Returns the order statistic (order 1..N) from the sorted data array (ascending). + + Sample array, must be sorted ascendingly. + One-based order of the statistic, must be between 1 and N (inclusive). + + + + Estimates the median value from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the p-Percentile value from the sorted data array (ascending). + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + Percentile selector, between 0 and 100 (inclusive). + + + + Estimates the first quartile value from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the third quartile value from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the inter-quartile range from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates {min, lower-quantile, median, upper-quantile, max} from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the tau-th quantile from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + Quantile selector, between 0.0 and 1.0 (inclusive). + + R-8, SciPy-(1/3,1/3): + Linear interpolation of the approximate medians for order statistics. + When tau < (2/3) / (N + 1/3), use x1. When tau >= (N - 1/3) / (N + 1/3), use xN. + + + + + Estimates the tau-th quantile from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile defintion can be specified + by 4 parameters a, b, c and d, consistent with Mathematica. + + Sample array, must be sorted ascendingly. + Quantile selector, between 0.0 and 1.0 (inclusive). + a-parameter + b-parameter + c-parameter + d-parameter + + + + Estimates the tau-th quantile from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + Sample array, must be sorted ascendingly. + Quantile selector, between 0.0 and 1.0 (inclusive). + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the empirical cumulative distribution function (CDF) at x from the sorted data array (ascending). + + The data sample sequence. + The value where to estimate the CDF at. + + + + Estimates the quantile tau from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile value. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Evaluates the rank of each entry of the sorted data array (ascending). + The rank definition can be specified to be compatible + with an existing system. + + + + + Returns the smallest value from the sorted data array (ascending). + + Sample array, must be sorted ascendingly. + + + + Returns the largest value from the sorted data array (ascending). + + Sample array, must be sorted ascendingly. + + + + Returns the order statistic (order 1..N) from the sorted data array (ascending). + + Sample array, must be sorted ascendingly. + One-based order of the statistic, must be between 1 and N (inclusive). + + + + Estimates the median value from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the p-Percentile value from the sorted data array (ascending). + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + Percentile selector, between 0 and 100 (inclusive). + + + + Estimates the first quartile value from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the third quartile value from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the inter-quartile range from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates {min, lower-quantile, median, upper-quantile, max} from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the tau-th quantile from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + Quantile selector, between 0.0 and 1.0 (inclusive). + + R-8, SciPy-(1/3,1/3): + Linear interpolation of the approximate medians for order statistics. + When tau < (2/3) / (N + 1/3), use x1. When tau >= (N - 1/3) / (N + 1/3), use xN. + + + + + Estimates the tau-th quantile from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile defintion can be specified + by 4 parameters a, b, c and d, consistent with Mathematica. + + Sample array, must be sorted ascendingly. + Quantile selector, between 0.0 and 1.0 (inclusive). + a-parameter + b-parameter + c-parameter + d-parameter + + + + Estimates the tau-th quantile from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + Sample array, must be sorted ascendingly. + Quantile selector, between 0.0 and 1.0 (inclusive). + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the empirical cumulative distribution function (CDF) at x from the sorted data array (ascending). + + The data sample sequence. + The value where to estimate the CDF at. + + + + Estimates the quantile tau from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile value. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Evaluates the rank of each entry of the sorted data array (ascending). + The rank definition can be specified to be compatible + with an existing system. + + + + + Extension methods to return basic statistics on set of data. + + + + + Returns the minimum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Returns the minimum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Returns the minimum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + Null-entries are ignored. + + The sample data. + The minimum value in the sample data. + + + + Returns the maximum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The maximum value in the sample data. + + + + Returns the maximum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The maximum value in the sample data. + + + + Returns the maximum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + Null-entries are ignored. + + The sample data. + The maximum value in the sample data. + + + + Returns the minimum absolute value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Returns the minimum absolute value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Returns the maximum absolute value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The maximum value in the sample data. + + + + Returns the maximum absolute value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The maximum value in the sample data. + + + + Returns the minimum magnitude and phase value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Returns the minimum magnitude and phase value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Returns the maximum magnitude and phase value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Returns the maximum magnitude and phase value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Evaluates the sample mean, an estimate of the population mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the mean of. + The mean of the sample. + + + + Evaluates the sample mean, an estimate of the population mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the mean of. + The mean of the sample. + + + + Evaluates the sample mean, an estimate of the population mean. + Returns NaN if data is empty or if any entry is NaN. + Null-entries are ignored. + + The data to calculate the mean of. + The mean of the sample. + + + + Evaluates the geometric mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the geometric mean of. + The geometric mean of the sample. + + + + Evaluates the geometric mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the geometric mean of. + The geometric mean of the sample. + + + + Evaluates the harmonic mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the harmonic mean of. + The harmonic mean of the sample. + + + + Evaluates the harmonic mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the harmonic mean of. + The harmonic mean of the sample. + + + + Estimates the unbiased population variance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population variance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population variance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + Null-entries are ignored. + + A subset of samples, sampled from the full population. + + + + Evaluates the variance from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + The full population data. + + + + Evaluates the variance from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + The full population data. + + + + Evaluates the variance from the provided full population. + On a dataset of size N will use an N normalize and would thus be biased if applied to a subsetr. + Returns NaN if data is empty or if any entry is NaN. + Null-entries are ignored. + + The full population data. + + + + Estimates the unbiased population standard deviation from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population standard deviation from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population standard deviation from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + Null-entries are ignored. + + A subset of samples, sampled from the full population. + + + + Evaluates the standard deviation from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + The full population data. + + + + Evaluates the standard deviation from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + The full population data. + + + + Evaluates the standard deviation from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + Null-entries are ignored. + + The full population data. + + + + Estimates the unbiased population skewness from the provided samples. + Uses a normalizer (Bessel's correction; type 2). + Returns NaN if data has less than three entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population skewness from the provided samples. + Uses a normalizer (Bessel's correction; type 2). + Returns NaN if data has less than three entries or if any entry is NaN. + Null-entries are ignored. + + A subset of samples, sampled from the full population. + + + + Evaluates the skewness from the full population. + Does not use a normalizer and would thus be biased if applied to a subset (type 1). + Returns NaN if data has less than two entries or if any entry is NaN. + + The full population data. + + + + Evaluates the skewness from the full population. + Does not use a normalizer and would thus be biased if applied to a subset (type 1). + Returns NaN if data has less than two entries or if any entry is NaN. + Null-entries are ignored. + + The full population data. + + + + Estimates the unbiased population kurtosis from the provided samples. + Uses a normalizer (Bessel's correction; type 2). + Returns NaN if data has less than four entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population kurtosis from the provided samples. + Uses a normalizer (Bessel's correction; type 2). + Returns NaN if data has less than four entries or if any entry is NaN. + Null-entries are ignored. + + A subset of samples, sampled from the full population. + + + + Evaluates the kurtosis from the full population. + Does not use a normalizer and would thus be biased if applied to a subset (type 1). + Returns NaN if data has less than three entries or if any entry is NaN. + + The full population data. + + + + Evaluates the kurtosis from the full population. + Does not use a normalizer and would thus be biased if applied to a subset (type 1). + Returns NaN if data has less than three entries or if any entry is NaN. + Null-entries are ignored. + + The full population data. + + + + Estimates the sample mean and the unbiased population variance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or if any entry is NaN and NaN for variance if data has less than two entries or if any entry is NaN. + + The data to calculate the mean of. + The mean of the sample. + + + + Estimates the sample mean and the unbiased population variance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or if any entry is NaN and NaN for variance if data has less than two entries or if any entry is NaN. + + The data to calculate the mean of. + The mean of the sample. + + + + Estimates the sample mean and the unbiased population standard deviation from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or if any entry is NaN and NaN for standard deviation if data has less than two entries or if any entry is NaN. + + The data to calculate the mean of. + The mean of the sample. + + + + Estimates the sample mean and the unbiased population standard deviation from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or if any entry is NaN and NaN for standard deviation if data has less than two entries or if any entry is NaN. + + The data to calculate the mean of. + The mean of the sample. + + + + Estimates the unbiased population skewness and kurtosis from the provided samples in a single pass. + Uses a normalizer (Bessel's correction; type 2). + + A subset of samples, sampled from the full population. + + + + Evaluates the skewness and kurtosis from the full population. + Does not use a normalizer and would thus be biased if applied to a subset (type 1). + + The full population data. + + + + Estimates the unbiased population covariance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population covariance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population covariance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + Null-entries are ignored. + + A subset of samples, sampled from the full population. + A subset of samples, sampled from the full population. + + + + Evaluates the population covariance from the provided full populations. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + The full population data. + The full population data. + + + + Evaluates the population covariance from the provided full populations. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + The full population data. + The full population data. + + + + Evaluates the population covariance from the provided full populations. + On a dataset of size N will use an N normalize and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + Null-entries are ignored. + + The full population data. + The full population data. + + + + Evaluates the root mean square (RMS) also known as quadratic mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the RMS of. + + + + Evaluates the root mean square (RMS) also known as quadratic mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the RMS of. + + + + Evaluates the root mean square (RMS) also known as quadratic mean. + Returns NaN if data is empty or if any entry is NaN. + Null-entries are ignored. + + The data to calculate the mean of. + + + + Estimates the sample median from the provided samples (R8). + + The data sample sequence. + + + + Estimates the sample median from the provided samples (R8). + + The data sample sequence. + + + + Estimates the sample median from the provided samples (R8). + + The data sample sequence. + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the p-Percentile value from the provided samples. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + Percentile selector, between 0 and 100 (inclusive). + + + + Estimates the p-Percentile value from the provided samples. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + Percentile selector, between 0 and 100 (inclusive). + + + + Estimates the p-Percentile value from the provided samples. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + Percentile selector, between 0 and 100 (inclusive). + + + + Estimates the p-Percentile value from the provided samples. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the p-Percentile value from the provided samples. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the p-Percentile value from the provided samples. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the first quartile value from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the first quartile value from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the first quartile value from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the third quartile value from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the third quartile value from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the third quartile value from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the inter-quartile range from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the inter-quartile range from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the inter-quartile range from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates {min, lower-quantile, median, upper-quantile, max} from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates {min, lower-quantile, median, upper-quantile, max} from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates {min, lower-quantile, median, upper-quantile, max} from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Returns the order statistic (order 1..N) from the provided samples. + + The data sample sequence. + One-based order of the statistic, must be between 1 and N (inclusive). + + + + Returns the order statistic (order 1..N) from the provided samples. + + The data sample sequence. + One-based order of the statistic, must be between 1 and N (inclusive). + + + + Returns the order statistic (order 1..N) from the provided samples. + + The data sample sequence. + + + + Returns the order statistic (order 1..N) from the provided samples. + + The data sample sequence. + + + + Evaluates the rank of each entry of the provided samples. + The rank definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Evaluates the rank of each entry of the provided samples. + The rank definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Evaluates the rank of each entry of the provided samples. + The rank definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Estimates the quantile tau from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile value. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Estimates the quantile tau from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile value. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Estimates the quantile tau from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile value. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Estimates the quantile tau from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Estimates the quantile tau from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Estimates the quantile tau from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Estimates the empirical cumulative distribution function (CDF) at x from the provided samples. + + The data sample sequence. + The value where to estimate the CDF at. + + + + Estimates the empirical cumulative distribution function (CDF) at x from the provided samples. + + The data sample sequence. + The value where to estimate the CDF at. + + + + Estimates the empirical cumulative distribution function (CDF) at x from the provided samples. + + The data sample sequence. + The value where to estimate the CDF at. + + + + Estimates the empirical cumulative distribution function (CDF) at x from the provided samples. + + The data sample sequence. + + + + Estimates the empirical cumulative distribution function (CDF) at x from the provided samples. + + The data sample sequence. + + + + Estimates the empirical cumulative distribution function (CDF) at x from the provided samples. + + The data sample sequence. + + + + Estimates the empirical inverse CDF at tau from the provided samples. + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + + + + Estimates the empirical inverse CDF at tau from the provided samples. + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + + + + Estimates the empirical inverse CDF at tau from the provided samples. + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + + + + Estimates the empirical inverse CDF at tau from the provided samples. + + The data sample sequence. + + + + Estimates the empirical inverse CDF at tau from the provided samples. + + The data sample sequence. + + + + Estimates the empirical inverse CDF at tau from the provided samples. + + The data sample sequence. + + + + Calculates the entropy of a stream of double values in bits. + Returns NaN if any of the values in the stream are NaN. + + The data sample sequence. + + + + Calculates the entropy of a stream of double values in bits. + Returns NaN if any of the values in the stream are NaN. + Null-entries are ignored. + + The data sample sequence. + + + + Evaluates the sample mean over a moving window, for each samples. + Returns NaN if no data is empty or if any entry is NaN. + + The sample stream to calculate the mean of. + The number of last samples to consider. + + + + Statistics operating on an IEnumerable in a single pass, without keeping the full data in memory. + Can be used in a streaming way, e.g. on large datasets not fitting into memory. + + + + + + + + Returns the smallest value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the smallest value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the largest value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the largest value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the smallest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the smallest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the largest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the largest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the smallest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the smallest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the largest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the largest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the arithmetic sample mean from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the arithmetic sample mean from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the geometric mean of the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the geometric mean of the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the harmonic mean of the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the harmonic mean of the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the unbiased population variance from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the unbiased population variance from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the population variance from the full population provided as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the population variance from the full population provided as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the unbiased population standard deviation from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the unbiased population standard deviation from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the population standard deviation from the full population provided as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the population standard deviation from the full population provided as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population variance from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN, and NaN for variance if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population variance from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN, and NaN for variance if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population standard deviation from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN, and NaN for standard deviation if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population standard deviation from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN, and NaN for standard deviation if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the unbiased population covariance from the provided two sample enumerable sequences, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + First sample stream. + Second sample stream. + + + + Estimates the unbiased population covariance from the provided two sample enumerable sequences, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + First sample stream. + Second sample stream. + + + + Evaluates the population covariance from the full population provided as two enumerable sequences, in a single pass without memoization. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + First population stream. + Second population stream. + + + + Evaluates the population covariance from the full population provided as two enumerable sequences, in a single pass without memoization. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + First population stream. + Second population stream. + + + + Estimates the root mean square (RMS) also known as quadratic mean from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the root mean square (RMS) also known as quadratic mean from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Calculates the entropy of a stream of double values. + Returns NaN if any of the values in the stream are NaN. + + The input stream to evaluate. + + + + + Used to simplify parallel code, particularly between the .NET 4.0 and Silverlight Code. + + + + + Executes a for loop in which iterations may run in parallel. + + The start index, inclusive. + The end index, exclusive. + The body to be invoked for each iteration range. + + + + Executes a for loop in which iterations may run in parallel. + + The start index, inclusive. + The end index, exclusive. + The partition size for splitting work into smaller pieces. + The body to be invoked for each iteration range. + + + + Executes each of the provided actions inside a discrete, asynchronous task. + + An array of actions to execute. + The actions array contains a null element. + At least one invocation of the actions threw an exception. + + + + Selects an item (such as Max or Min). + + Starting index of the loop. + Ending index of the loop + The function to select items over a subset. + The function to select the item of selection from the subsets. + The selected value. + + + + Selects an item (such as Max or Min). + + The array to iterate over. + The function to select items over a subset. + The function to select the item of selection from the subsets. + The selected value. + + + + Selects an item (such as Max or Min). + + Starting index of the loop. + Ending index of the loop + The function to select items over a subset. + The function to select the item of selection from the subsets. + Default result of the reduce function on an empty set. + The selected value. + + + + Selects an item (such as Max or Min). + + The array to iterate over. + The function to select items over a subset. + The function to select the item of selection from the subsets. + Default result of the reduce function on an empty set. + The selected value. + + + + Double-precision trigonometry toolkit. + + + + + Constant to convert a degree to grad. + + + + + Converts a degree (360-periodic) angle to a grad (400-periodic) angle. + + The degree to convert. + The converted grad angle. + + + + Converts a degree (360-periodic) angle to a radian (2*Pi-periodic) angle. + + The degree to convert. + The converted radian angle. + + + + Converts a grad (400-periodic) angle to a degree (360-periodic) angle. + + The grad to convert. + The converted degree. + + + + Converts a grad (400-periodic) angle to a radian (2*Pi-periodic) angle. + + The grad to convert. + The converted radian. + + + + Converts a radian (2*Pi-periodic) angle to a degree (360-periodic) angle. + + The radian to convert. + The converted degree. + + + + Converts a radian (2*Pi-periodic) angle to a grad (400-periodic) angle. + + The radian to convert. + The converted grad. + + + + Normalized Sinc function. sinc(x) = sin(pi*x)/(pi*x). + + + + + Trigonometric Sine of an angle in radian, or opposite / hypotenuse. + + The angle in radian. + The sine of the radian angle. + + + + Trigonometric Sine of a Complex number. + + The complex value. + The sine of the complex number. + + + + Trigonometric Cosine of an angle in radian, or adjacent / hypotenuse. + + The angle in radian. + The cosine of an angle in radian. + + + + Trigonometric Cosine of a Complex number. + + The complex value. + The cosine of a complex number. + + + + Trigonometric Tangent of an angle in radian, or opposite / adjacent. + + The angle in radian. + The tangent of the radian angle. + + + + Trigonometric Tangent of a Complex number. + + The complex value. + The tangent of the complex number. + + + + Trigonometric Cotangent of an angle in radian, or adjacent / opposite. Reciprocal of the tangent. + + The angle in radian. + The cotangent of an angle in radian. + + + + Trigonometric Cotangent of a Complex number. + + The complex value. + The cotangent of the complex number. + + + + Trigonometric Secant of an angle in radian, or hypotenuse / adjacent. Reciprocal of the cosine. + + The angle in radian. + The secant of the radian angle. + + + + Trigonometric Secant of a Complex number. + + The complex value. + The secant of the complex number. + + + + Trigonometric Cosecant of an angle in radian, or hypotenuse / opposite. Reciprocal of the sine. + + The angle in radian. + Cosecant of an angle in radian. + + + + Trigonometric Cosecant of a Complex number. + + The complex value. + The cosecant of a complex number. + + + + Trigonometric principal Arc Sine in radian + + The opposite for a unit hypotenuse (i.e. opposite / hyptenuse). + The angle in radian. + + + + Trigonometric principal Arc Sine of this Complex number. + + The complex value. + The arc sine of a complex number. + + + + Trigonometric principal Arc Cosine in radian + + The adjacent for a unit hypotenuse (i.e. adjacent / hypotenuse). + The angle in radian. + + + + Trigonometric principal Arc Cosine of this Complex number. + + The complex value. + The arc cosine of a complex number. + + + + Trigonometric principal Arc Tangent in radian + + The opposite for a unit adjacent (i.e. opposite / adjacent). + The angle in radian. + + + + Trigonometric principal Arc Tangent of this Complex number. + + The complex value. + The arc tangent of a complex number. + + + + Trigonometric principal Arc Cotangent in radian + + The adjacent for a unit opposite (i.e. adjacent / opposite). + The angle in radian. + + + + Trigonometric principal Arc Cotangent of this Complex number. + + The complex value. + The arc cotangent of a complex number. + + + + Trigonometric principal Arc Secant in radian + + The hypotenuse for a unit adjacent (i.e. hypotenuse / adjacent). + The angle in radian. + + + + Trigonometric principal Arc Secant of this Complex number. + + The complex value. + The arc secant of a complex number. + + + + Trigonometric principal Arc Cosecant in radian + + The hypotenuse for a unit opposite (i.e. hypotenuse / opposite). + The angle in radian. + + + + Trigonometric principal Arc Cosecant of this Complex number. + + The complex value. + The arc cosecant of a complex number. + + + + Hyperbolic Sine + + The hyperbolic angle, i.e. the area of the hyperbolic sector. + The hyperbolic sine of the angle. + + + + Hyperbolic Sine of a Complex number. + + The complex value. + The hyperbolic sine of a complex number. + + + + Hyperbolic Cosine + + The hyperbolic angle, i.e. the area of the hyperbolic sector. + The hyperbolic Cosine of the angle. + + + + Hyperbolic Cosine of a Complex number. + + The complex value. + The hyperbolic cosine of a complex number. + + + + Hyperbolic Tangent in radian + + The hyperbolic angle, i.e. the area of the hyperbolic sector. + The hyperbolic tangent of the angle. + + + + Hyperbolic Tangent of a Complex number. + + The complex value. + The hyperbolic tangent of a complex number. + + + + Hyperbolic Cotangent + + The hyperbolic angle, i.e. the area of the hyperbolic sector. + The hyperbolic cotangent of the angle. + + + + Hyperbolic Cotangent of a Complex number. + + The complex value. + The hyperbolic cotangent of a complex number. + + + + Hyperbolic Secant + + The hyperbolic angle, i.e. the area of the hyperbolic sector. + The hyperbolic secant of the angle. + + + + Hyperbolic Secant of a Complex number. + + The complex value. + The hyperbolic secant of a complex number. + + + + Hyperbolic Cosecant + + The hyperbolic angle, i.e. the area of the hyperbolic sector. + The hyperbolic cosecant of the angle. + + + + Hyperbolic Cosecant of a Complex number. + + The complex value. + The hyperbolic cosecant of a complex number. + + + + Hyperbolic Area Sine + + The real value. + The hyperbolic angle, i.e. the area of its hyperbolic sector. + + + + Hyperbolic Area Sine of this Complex number. + + The complex value. + The hyperbolic arc sine of a complex number. + + + + Hyperbolic Area Cosine + + The real value. + The hyperbolic angle, i.e. the area of its hyperbolic sector. + + + + Hyperbolic Area Cosine of this Complex number. + + The complex value. + The hyperbolic arc cosine of a complex number. + + + + Hyperbolic Area Tangent + + The real value. + The hyperbolic angle, i.e. the area of its hyperbolic sector. + + + + Hyperbolic Area Tangent of this Complex number. + + The complex value. + The hyperbolic arc tangent of a complex number. + + + + Hyperbolic Area Cotangent + + The real value. + The hyperbolic angle, i.e. the area of its hyperbolic sector. + + + + Hyperbolic Area Cotangent of this Complex number. + + The complex value. + The hyperbolic arc cotangent of a complex number. + + + + Hyperbolic Area Secant + + The real value. + The hyperbolic angle, i.e. the area of its hyperbolic sector. + + + + Hyperbolic Area Secant of this Complex number. + + The complex value. + The hyperbolic arc secant of a complex number. + + + + Hyperbolic Area Cosecant + + The real value. + The hyperbolic angle, i.e. the area of its hyperbolic sector. + + + + Hyperbolic Area Cosecant of this Complex number. + + The complex value. + The hyperbolic arc cosecant of a complex number. + + + + Hamming window. Named after Richard Hamming. + Symmetric version, useful e.g. for filter design purposes. + + + + + Hamming window. Named after Richard Hamming. + Periodic version, useful e.g. for FFT purposes. + + + + + Hann window. Named after Julius von Hann. + Symmetric version, useful e.g. for filter design purposes. + + + + + Hann window. Named after Julius von Hann. + Periodic version, useful e.g. for FFT purposes. + + + + + Cosine window. + Symmetric version, useful e.g. for filter design purposes. + + + + + Cosine window. + Periodic version, useful e.g. for FFT purposes. + + + + + Lanczos window. + Symmetric version, useful e.g. for filter design purposes. + + + + + Lanczos window. + Periodic version, useful e.g. for FFT purposes. + + + + + Gauss window. + + + + + Blackman window. + + + + + Blackman-Harris window. + + + + + Blackman-Nuttall window. + + + + + Bartlett window. + + + + + Bartlett-Hann window. + + + + + Nuttall window. + + + + + Flat top window. + + + + + Uniform rectangular (dirichlet) window. + + + + + Triangular window. + + + + + A strongly-typed resource class, for looking up localized strings, etc. + + + + + Returns the cached ResourceManager instance used by this class. + + + + + Overrides the current thread's CurrentUICulture property for all + resource lookups using this strongly typed resource class. + + + + + Looks up a localized string similar to The accuracy couldn't be reached with the specified number of iterations.. + + + + + Looks up a localized string similar to The array arguments must have the same length.. + + + + + Looks up a localized string similar to The given array has the wrong length. Should be {0}.. + + + + + Looks up a localized string similar to The argument must be between 0 and 1.. + + + + + Looks up a localized string similar to Value cannot be in the range -1 < x < 1.. + + + + + Looks up a localized string similar to Value must be even.. + + + + + Looks up a localized string similar to The histogram does not contain the value.. + + + + + Looks up a localized string similar to Value is expected to be between {0} and {1} (including {0} and {1}).. + + + + + Looks up a localized string similar to At least one item of {0} is a null reference (Nothing in Visual Basic).. + + + + + Looks up a localized string similar to Value must be greater than or equal to one.. + + + + + Looks up a localized string similar to Matrix dimensions must agree.. + + + + + Looks up a localized string similar to Matrix dimensions must agree: {0}.. + + + + + Looks up a localized string similar to Matrix dimensions must agree: op1 is {0}, op2 is {1}.. + + + + + Looks up a localized string similar to Matrix dimensions must agree: op1 is {0}, op2 is {1}, op3 is {2}.. + + + + + Looks up a localized string similar to The requested matrix does not exist.. + + + + + Looks up a localized string similar to The matrix indices must not be out of range of the given matrix.. + + + + + Looks up a localized string similar to Matrix must not be rank deficient.. + + + + + Looks up a localized string similar to Matrix must not be singular.. + + + + + Looks up a localized string similar to Matrix must be positive definite.. + + + + + Looks up a localized string similar to Matrix column dimensions must agree.. + + + + + Looks up a localized string similar to Matrix row dimensions must agree.. + + + + + Looks up a localized string similar to Matrix must have exactly one column.. + + + + + Looks up a localized string similar to Matrix must have exactly one column and row, thus have only one cell.. + + + + + Looks up a localized string similar to Matrix must have exactly one row.. + + + + + Looks up a localized string similar to Matrix must be square.. + + + + + Looks up a localized string similar to Matrix must be symmetric.. + + + + + Looks up a localized string similar to Matrix must be symmetric positive definite.. + + + + + Looks up a localized string similar to In the specified range, the exclusive maximum must be greater than the inclusive minimum.. + + + + + Looks up a localized string similar to In the specified range, the minimum is greater than maximum.. + + + + + Looks up a localized string similar to Value must be positive.. + + + + + Looks up a localized string similar to Value must neither be infinite nor NaN.. + + + + + Looks up a localized string similar to Value must not be negative (zero is ok).. + + + + + Looks up a localized string similar to {0} is a null reference (Nothing in Visual Basic).. + + + + + Looks up a localized string similar to Value must be odd.. + + + + + Looks up a localized string similar to {0} must be greater than {1}.. + + + + + Looks up a localized string similar to {0} must be greater than or equal to {1}.. + + + + + Looks up a localized string similar to {0} must be smaller than {1}.. + + + + + Looks up a localized string similar to {0} must be smaller than or equal to {1}.. + + + + + Looks up a localized string similar to The chosen parameter set is invalid (probably some value is out of range).. + + + + + Looks up a localized string similar to The given expression does not represent a complex number.. + + + + + Looks up a localized string similar to Value must be positive (and not zero).. + + + + + Looks up a localized string similar to Size must be a Power of Two.. + + + + + Looks up a localized string similar to Size must be a Power of Two in every dimension.. + + + + + Looks up a localized string similar to The range between {0} and {1} must be less than or equal to {2}.. + + + + + Looks up a localized string similar to Arguments must be different objects.. + + + + + Looks up a localized string similar to Array must have exactly one dimension (and not be null).. + + + + + Looks up a localized string similar to Value is too large.. + + + + + Looks up a localized string similar to Value is too large for the current iteration limit.. + + + + + Looks up a localized string similar to Type mismatch.. + + + + + Looks up a localized string similar to The upper bound must be strictly larger than the lower bound.. + + + + + Looks up a localized string similar to The upper bound must be at least as large as the lower bound.. + + + + + Looks up a localized string similar to Array length must be a multiple of {0}.. + + + + + Looks up a localized string similar to All vectors must have the same dimensionality.. + + + + + Looks up a localized string similar to The vector must have 3 dimensions.. + + + + + Looks up a localized string similar to The given array is too small. It must be at least {0} long.. + + + + + Looks up a localized string similar to Big endian files are not supported.. + + + + + Looks up a localized string similar to The supplied collection is empty.. + + + + + Looks up a localized string similar to Complex matrices are not supported.. + + + + + Looks up a localized string similar to An algorithm failed to converge.. + + + + + Looks up a localized string similar to The sample size must be larger than the given degrees of freedom.. + + + + + Looks up a localized string similar to This feature is not implemented yet (but is planned).. + + + + + Looks up a localized string similar to The given file doesn't exist.. + + + + + Looks up a localized string similar to Sample points should be sorted in strictly ascending order. + + + + + Looks up a localized string similar to All sample points should be unique.. + + + + + Looks up a localized string similar to Invalid parameterization for the distribution.. + + + + + Looks up a localized string similar to Invalid Left Boundary Condition.. + + + + + Looks up a localized string similar to The operation could not be performed because the accumulator is empty.. + + + + + Looks up a localized string similar to The operation could not be performed because the histogram is empty.. + + + + + Looks up a localized string similar to Not enough points in the distribution.. + + + + + Looks up a localized string similar to No Samples Provided. Preparation Required.. + + + + + Looks up a localized string similar to An invalid parameter was passed to a native method.. + + + + + Looks up a localized string similar to An invalid parameter was passed to a native method, parameter number : {0}. + + + + + Looks up a localized string similar to Invalid Right Boundary Condition.. + + + + + Looks up a localized string similar to Lag must be positive. + + + + + Looks up a localized string similar to Lag must be smaller than the sample size. + + + + + Looks up a localized string similar to ddd MMM dd HH:mm:ss yyyy. + + + + + Looks up a localized string similar to Matrices can not be empty and must have at least one row and column.. + + + + + Looks up a localized string similar to The number of columns of a matrix must be positive.. + + + + + Looks up a localized string similar to Matrix must be in sparse storage format. + + + + + Looks up a localized string similar to The number of rows of a matrix must be positive.. + + + + + Looks up a localized string similar to The number of rows or columns of a matrix must be positive.. + + + + + Looks up a localized string similar to Unable to allocate native memory.. + + + + + Looks up a localized string similar to Only 1 and 2 dimensional arrays are supported.. + + + + + Looks up a localized string similar to Data must contain at least {0} values.. + + + + + Looks up a localized string similar to Name cannot contain a space. name: {0}. + + + + + Looks up a localized string similar to {0} is not a supported type.. + + + + + Looks up a localized string similar to Algorithm experience a numerical break down + . + + + + + Looks up a localized string similar to The two arguments can't be compared (maybe they are part of a partial ordering?). + + + + + Looks up a localized string similar to The integer array does not represent a valid permutation.. + + + + + Looks up a localized string similar to The sampler's proposal distribution is not upper bounding the target density.. + + + + + Looks up a localized string similar to A regression of the requested order requires at least {0} samples. Only {1} samples have been provided. . + + + + + Looks up a localized string similar to The algorithm has failed, exceeded the number of iterations allowed or there is no root within the provided bounds.. + + + + + Looks up a localized string similar to The algorithm has failed, exceeded the number of iterations allowed or there is no root within the provided bounds. Consider to use RobustNewtonRaphson instead.. + + + + + Looks up a localized string similar to The lower and upper bounds must bracket a single root.. + + + + + Looks up a localized string similar to The algorithm ended without root in the range.. + + + + + Looks up a localized string similar to The number of rows must greater than or equal to the number of columns.. + + + + + Looks up a localized string similar to All sample vectors must have the same length. However, vectors with disagreeing length {0} and {1} have been provided. A sample with index i is given by the value at index i of each provided vector.. + + + + + Looks up a localized string similar to U is singular, and the inversion could not be completed.. + + + + + Looks up a localized string similar to U is singular, and the inversion could not be completed. The {0}-th diagonal element of the factor U is zero.. + + + + + Looks up a localized string similar to The singular vectors were not computed.. + + + + + Looks up a localized string similar to This special case is not supported yet (but is planned).. + + + + + Looks up a localized string similar to The given stop criterion already exist in the collection.. + + + + + Looks up a localized string similar to There is no stop criterion in the collection.. + + + + + Looks up a localized string similar to String parameter cannot be empty or null.. + + + + + Looks up a localized string similar to We only support sparse matrix with less than int.MaxValue elements.. + + + + + Looks up a localized string similar to The moment of the distribution is undefined.. + + + + + Looks up a localized string similar to A user defined provider has not been specified.. + + + + + Looks up a localized string similar to User work buffers are not supported by this provider.. + + + + + Looks up a localized string similar to Vectors can not be empty and must have at least one element.. + + + + + Looks up a localized string similar to The given work array is too small. Check work[0] for the corret size.. + + +
+
diff --git a/src/packages/MathNet.Numerics.3.16.0/lib/net40/MathNet.Numerics.dll b/src/packages/MathNet.Numerics.3.16.0/lib/net40/MathNet.Numerics.dll new file mode 100644 index 0000000..1d2d595 Binary files /dev/null and b/src/packages/MathNet.Numerics.3.16.0/lib/net40/MathNet.Numerics.dll differ diff --git a/src/packages/MathNet.Numerics.3.16.0/lib/net40/MathNet.Numerics.xml b/src/packages/MathNet.Numerics.3.16.0/lib/net40/MathNet.Numerics.xml new file mode 100644 index 0000000..442c994 --- /dev/null +++ b/src/packages/MathNet.Numerics.3.16.0/lib/net40/MathNet.Numerics.xml @@ -0,0 +1,52492 @@ + + + + MathNet.Numerics + + + + + Numerical Derivative. + + + + + Initialized a NumericalDerivative with the given points and center. + + + + + Initialized a NumericalDerivative with the default points and center for the given order. + + + + + Evaluates the derivative of a scalar univariate function. + + Univariate function handle. + Point at which to evaluate the derivative. + Derivative order. + + + + Creates a function handle for the derivative of a scalar univariate function. + + Univariate function handle. + Derivative order. + + + + Evaluates the first derivative of a scalar univariate function. + + Univariate function handle. + Point at which to evaluate the derivative. + + + + Creates a function handle for the first derivative of a scalar univariate function. + + Univariate function handle. + + + + Evaluates the second derivative of a scalar univariate function. + + Univariate function handle. + Point at which to evaluate the derivative. + + + + Creates a function handle for the second derivative of a scalar univariate function. + + Univariate function handle. + + + + Evaluates the partial derivative of a multivariate function. + + Multivariate function handle. + Vector at which to evaluate the derivative. + Index of independent variable for partial derivative. + Derivative order. + + + + Creates a function handle for the partial derivative of a multivariate function. + + Multivariate function handle. + Index of independent variable for partial derivative. + Derivative order. + + + + Evaluates the first partial derivative of a multivariate function. + + Multivariate function handle. + Vector at which to evaluate the derivative. + Index of independent variable for partial derivative. + + + + Creates a function handle for the first partial derivative of a multivariate function. + + Multivariate function handle. + Index of independent variable for partial derivative. + + + + Evaluates the partial derivative of a bivariate function. + + Bivariate function handle. + First argument at which to evaluate the derivative. + Second argument at which to evaluate the derivative. + Index of independent variable for partial derivative. + Derivative order. + + + + Creates a function handle for the partial derivative of a bivariate function. + + Bivariate function handle. + Index of independent variable for partial derivative. + Derivative order. + + + + Evaluates the first partial derivative of a bivariate function. + + Bivariate function handle. + First argument at which to evaluate the derivative. + Second argument at which to evaluate the derivative. + Index of independent variable for partial derivative. + + + + Creates a function handle for the first partial derivative of a bivariate function. + + Bivariate function handle. + Index of independent variable for partial derivative. + + + + Class to calculate finite difference coefficients using Taylor series expansion method. + + + For n points, coefficients are calculated up to the maximum derivative order possible (n-1). + The current function value position specifies the "center" for surrounding coefficients. + Selecting the first, middle or last positions represent forward, backwards and central difference methods. + + + + + + + Number of points for finite difference coefficients. Changing this value recalculates the coefficients table. + + + + + Initializes a new instance of the class. + + Number of finite difference coefficients. + + + + Gets the finite difference coefficients for a specified center and order. + + Current function position with respect to coefficients. Must be within point range. + Order of finite difference coefficients. + Vector of finite difference coefficients. + + + + Gets the finite difference coefficients for all orders at a specified center. + + Current function position with respect to coefficients. Must be within point range. + Rectangular array of coefficients, with columns specifing order. + + + + Type of finite different step size. + + + + + The absolute step size value will be used in numerical derivatives, regardless of order or function parameters. + + + + + A base step size value, h, will be scaled according to the function input parameter. A common example is hx = h*(1+abs(x)), however + this may vary depending on implementation. This definition only guarantees that the only scaling will be relative to the + function input parameter and not the order of the finite difference derivative. + + + + + A base step size value, eps (typically machine precision), is scaled according to the finite difference coefficient order + and function input parameter. The initial scaling according to finite different coefficient order can be thought of as producing a + base step size, h, that is equivalent to scaling. This stepsize is then scaled according to the function + input parameter. Although implementation may vary, an example of second order accurate scaling may be (eps)^(1/3)*(1+abs(x)). + + + + + Class to evaluate the numerical derivative of a function using finite difference approximations. + Variable point and center methods can be initialized . + This class can also be used to return function handles (delegates) for a fixed derivative order and variable. + It is possible to evaluate the derivative and partial derivative of univariate and multivariate functions respectively. + + + + + Initializes a NumericalDerivative class with the default 3 point center difference method. + + + + + Initialized a NumericalDerivative class. + + Number of points for finite difference derivatives. + Location of the center with respect to other points. Value ranges from zero to points-1. + + + + Sets and gets the finite difference step size. This value is for each function evaluation if relative stepsize types are used. + If the base step size used in scaling is desired, see . + + + Setting then getting the StepSize may return a different value. This is not unusual since a user-defined step size is converted to a + base-2 representable number to improve finite difference accuracy. + + + + + Sets and gets the base fininte difference step size. This assigned value to this parameter is only used if is set to RelativeX. + However, if the StepType is Relative, it will contain the base step size computed from based on the finite difference order. + + + + + Sets and gets the base finite difference step size. This parameter is only used if is set to Relative. + By default this is set to machine epsilon, from which is computed. + + + + + Sets and gets the location of the center point for the finite difference derivative. + + + + + Number of times a function is evaluated for numerical derivatives. + + + + + Type of step size for computing finite differences. If set to absolute, dx = h. + If set to relative, dx = (1+abs(x))*h^(2/(order+1)). This provides accurate results when + h is approximately equal to the square-root of machine accuracy, epsilon. + + + + + Evaluates the derivative of equidistant points using the finite difference method. + + Vector of points StepSize apart. + Derivative order. + Finite difference step size. + Derivative of points of the specified order. + + + + Evaluates the derivative of a scalar univariate function. + + + Supplying the optional argument currentValue will reduce the number of function evaluations + required to calculate the finite difference derivative. + + Function handle. + Point at which to compute the derivative. + Derivative order. + Current function value at center. + Function derivative at x of the specified order. + + + + Creates a function handle for the derivative of a scalar univariate function. + + Input function handle. + Derivative order. + Function handle that evaluates the derivative of input function at a fixed order. + + + + Evaluates the partial derivative of a multivariate function. + + Multivariate function handle. + Vector at which to evaluate the derivative. + Index of independent variable for partial derivative. + Derivative order. + Current function value at center. + Function partial derivative at x of the specified order. + + + + Evaluates the partial derivatives of a multivariate function array. + + + This function assumes the input vector x is of the correct length for f. + + Multivariate vector function array handle. + Vector at which to evaluate the derivatives. + Index of independent variable for partial derivative. + Derivative order. + Current function value at center. + Vector of functions partial derivatives at x of the specified order. + + + + Creates a function handle for the partial derivative of a multivariate function. + + Input function handle. + Index of the independent variable for partial derivative. + Derivative order. + Function handle that evaluates partial derivative of input function at a fixed order. + + + + Creates a function handle for the partial derivative of a vector multivariate function. + + Input function handle. + Index of the independent variable for partial derivative. + Derivative order. + Function handle that evaluates partial derivative of input function at fixed order. + + + + Evaluates the mixed partial derivative of variable order for multivariate functions. + + + This function recursively uses to evaluate mixed partial derivative. + Therefore, it is more efficient to call for higher order derivatives of + a single independent variable. + + Multivariate function handle. + Points at which to evaluate the derivative. + Vector of indices for the independent variables at descending derivative orders. + Highest order of differentiation. + Current function value at center. + Function mixed partial derivative at x of the specified order. + + + + Evaluates the mixed partial derivative of variable order for multivariate function arrays. + + + This function recursively uses to evaluate mixed partial derivative. + Therefore, it is more efficient to call for higher order derivatives of + a single independent variable. + + Multivariate function array handle. + Vector at which to evaluate the derivative. + Vector of indices for the independent variables at descending derivative orders. + Highest order of differentiation. + Current function value at center. + Function mixed partial derivatives at x of the specified order. + + + + Creates a function handle for the mixed partial derivative of a multivariate function. + + Input function handle. + Vector of indices for the independent variables at descending derivative orders. + Highest derivative order. + Function handle that evaluates the fixed mixed partial derivative of input function at fixed order. + + + + Creates a function handle for the mixed partial derivative of a multivariate vector function. + + Input vector function handle. + Vector of indices for the independent variables at descending derivative orders. + Highest derivative order. + Function handle that evaluates the fixed mixed partial derivative of input function at fixed order. + + + + Resets the evaluation counter. + + + + + Class for evaluating the Hessian of a smooth continuously differentiable function using finite differences. + By default, a central 3-point method is used. + + + + + Number of function evaluations. + + + + + Creates a numerical Hessian object with a three point central difference method. + + + + + Creates a numerical Hessian with a specified differentiation scheme. + + Number of points for Hessian evaluation. + Center point for differentiation. + + + + Evaluates the Hessian of the scalar univariate function f at points x. + + Scalar univariate function handle. + Point at which to evaluate Hessian. + Hessian tensor. + + + + Evaluates the Hessian of a multivariate function f at points x. + + + This method of computing the Hessian is only vaid for Lipschitz continuous functions. + The function mirrors the Hessian along the diagonal since d2f/dxdy = d2f/dydx for continuously differentiable functions. + + Multivariate function handle.> + Points at which to evaluate Hessian.> + Hessian tensor. + + + + Resets the function evaluation counter for the Hessian. + + + + + Class for evaluating the Jacobian of a function using finite differences. + By default, a central 3-point method is used. + + + + + Number of function evaluations. + + + + + Creates a numerical Jacobian object with a three point central difference method. + + + + + Creates a numerical Jacobian with a specified differentiation scheme. + + Number of points for Jacobian evaluation. + Center point for differentiation. + + + + Evaluates the Jacobian of scalar univariate function f at point x. + + Scalar univariate function handle. + Point at which to evaluate Jacobian. + Jacobian vector. + + + + Evaluates the Jacobian of a multivariate function f at vector x. + + + This function assumes that the length of vector x consistent with the argument count of f. + + Multivariate function handle. + Points at which to evaluate Jacobian. + Jacobian vector. + + + + Evaluates the Jacobian of a multivariate function f at vector x given a current function value. + + + To minimize the number of function evaluations, a user can supply the current value of the function + to be used in computing the Jacobian. This value must correspond to the "center" location for the + finite differencing. If a scheme is used where the center value is not evaluated, this will provide no + added efficiency. This method also assumes that the length of vector x consistent with the argument count of f. + + Multivariate function handle. + Points at which to evaluate Jacobian. + Current function value at finite difference center. + Jacobian vector. + + + + Evaluates the Jacobian of a multivariate function array f at vector x. + + Multivariate function array handle. + Vector at which to evaluate Jacobian. + Jacobian matrix. + + + + Evaluates the Jacobian of a multivariate function array f at vector x given a vector of current function values. + + + To minimize the number of function evaluations, a user can supply a vector of current values of the functions + to be used in computing the Jacobian. These value must correspond to the "center" location for the + finite differencing. If a scheme is used where the center value is not evaluated, this will provide no + added efficiency. This method also assumes that the length of vector x consistent with the argument count of f. + + Multivariate function array handle. + Vector at which to evaluate Jacobian. + Vector of current function values. + Jacobian matrix. + + + + Resets the function evaluation counter for the Jacobian. + + + + + Initializes a new instance of the BetaScaled class. + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + + + + Initializes a new instance of the BetaScaled class. + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The random number generator which is used to draw random samples. + + + + Create a Beta PERT distribution, used in risk analysis and other domains where an expert forecast + is used to construct an underlying beta distribution. + + The minimum value. + The maximum value. + The most likely value (mode). + The random number generator which is used to draw random samples. + The Beta distribution derived from the PERT parameters. + + + + A string representation of the distribution. + + A string representation of the BetaScaled distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + + + + Gets the α shape parameter of the BetaScaled distribution. Range: α > 0. + + + + + Gets the β shape parameter of the BetaScaled distribution. Range: β > 0. + + + + + Gets the location (μ) of the BetaScaled distribution. + + + + + Gets the scale (σ) of the BetaScaled distribution. Range: σ > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the BetaScaled distribution. + + + + + Gets the variance of the BetaScaled distribution. + + + + + Gets the standard deviation of the BetaScaled distribution. + + + + + Gets the entropy of the BetaScaled distribution. + + + + + Gets the skewness of the BetaScaled distribution. + + + + + Gets the mode of the BetaScaled distribution; when there are multiple answers, this routine will return 0.5. + + + + + Gets the median of the BetaScaled distribution. + + + + + Gets the minimum of the BetaScaled distribution. + + + + + Gets the maximum of the BetaScaled distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Generates a sample from the distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Generates a sample from the distribution. + + The random number generator to use. + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Triangular distribution. + For details, see Wikipedia - Triangular distribution. + + The distribution will use the by default. + Users can get/set the random number generator by using the property. + The statistics classes will check whether all the incoming parameters are in the allowed range. This might involve heavy computation. Optionally, by setting Control.CheckDistributionParameters + to false, all parameter checks can be turned off. + + + + Initializes a new instance of the Triangular class with the given lower bound, upper bound and mode. + + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + If the upper bound is smaller than the mode or if the mode is smaller than the lower bound. + + + + Initializes a new instance of the Triangular class with the given lower bound, upper bound and mode. + + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + The random number generator which is used to draw random samples. + If the upper bound is smaller than the mode or if the mode is smaller than the lower bound. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + + + + Gets the lower bound of the distribution. + + + + + Gets the upper bound of the distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + + Gets the skewness of the distribution. + + + + + Gets or sets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Generates a sample from the Triangular distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Triangular distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + the inverse cumulative density at . + + + + + Generates a sample from the Triangular distribution. + + The random number generator to use. + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + a sample from the distribution. + + + + Generates a sequence of samples from the Triangular distribution. + + The random number generator to use. + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + a sequence of samples from the distribution. + + + + Generates a sample from the Triangular distribution. + + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + a sample from the distribution. + + + + Generates a sequence of samples from the Triangular distribution. + + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + a sequence of samples from the distribution. + + + + Discrete Univariate Bernoulli distribution. + The Bernoulli distribution is a distribution over bits. The parameter + p specifies the probability that a 1 is generated. + Wikipedia - Bernoulli distribution. + + + + + Initializes a new instance of the Bernoulli class. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + If the Bernoulli parameter is not in the range [0,1]. + + + + Initializes a new instance of the Bernoulli class. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + The random number generator which is used to draw random samples. + If the Bernoulli parameter is not in the range [0,1]. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Gets the probability of generating a one. Range: 0 ≤ p ≤ 1. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the mode of the distribution. + + + + + Gets all modes of the distribution. + + + + + Gets the median of the distribution. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + the cumulative distribution at location . + + + + + Generates one sample from the Bernoulli distribution. + + The random source to use. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + A random sample from the Bernoulli distribution. + + + + Samples a Bernoulli distributed random variable. + + A sample from the Bernoulli distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of Bernoulli distributed random variables. + + a sequence of samples from the distribution. + + + + Samples a Bernoulli distributed random variable. + + The random number generator to use. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + A sample from the Bernoulli distribution. + + + + Samples a sequence of Bernoulli distributed random variables. + + The random number generator to use. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + a sequence of samples from the distribution. + + + + Samples a Bernoulli distributed random variable. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + A sample from the Bernoulli distribution. + + + + Samples a sequence of Bernoulli distributed random variables. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + a sequence of samples from the distribution. + + + + Continuous Univariate Beta distribution. + For details about this distribution, see + Wikipedia - Beta distribution. + + + There are a few special cases for the parameterization of the Beta distribution. When both + shape parameters are positive infinity, the Beta distribution degenerates to a point distribution + at 0.5. When one of the shape parameters is positive infinity, the distribution degenerates to a point + distribution at the positive infinity. When both shape parameters are 0.0, the Beta distribution + degenerates to a Bernoulli distribution with parameter 0.5. When one shape parameter is 0.0, the + distribution degenerates to a point distribution at the non-zero shape parameter. + + + + + Initializes a new instance of the Beta class. + + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + + + + Initializes a new instance of the Beta class. + + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + A string representation of the Beta distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + + + + Gets the α shape parameter of the Beta distribution. Range: α ≥ 0. + + + + + Gets the β shape parameter of the Beta distribution. Range: β ≥ 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the Beta distribution. + + + + + Gets the variance of the Beta distribution. + + + + + Gets the standard deviation of the Beta distribution. + + + + + Gets the entropy of the Beta distribution. + + + + + Gets the skewness of the Beta distribution. + + + + + Gets the mode of the Beta distribution; when there are multiple answers, this routine will return 0.5. + + + + + Gets the median of the Beta distribution. + + + + + Gets the minimum of the Beta distribution. + + + + + Gets the maximum of the Beta distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Generates a sample from the Beta distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Beta distribution. + + a sequence of samples from the distribution. + + + + Samples Beta distributed random variables by sampling two Gamma variables and normalizing. + + The random number generator to use. + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + a random number from the Beta distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Generates a sample from the distribution. + + The random number generator to use. + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Discrete Univariate Binomial distribution. + For details about this distribution, see + Wikipedia - Binomial distribution. + + + The distribution is parameterized by a probability (between 0.0 and 1.0). + + + + + Initializes a new instance of the Binomial class. + + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + If is not in the interval [0.0,1.0]. + If is negative. + + + + Initializes a new instance of the Binomial class. + + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + The random number generator which is used to draw random samples. + If is not in the interval [0.0,1.0]. + If is negative. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + + + + Gets the success probability in each trial. Range: 0 ≤ p ≤ 1. + + + + + Gets the number of trials. Range: n ≥ 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the mode of the distribution. + + + + + Gets all modes of the distribution. + + + + + Gets the median of the distribution. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + the cumulative distribution at location . + + + + + Generates a sample from the Binomial distribution without doing parameter checking. + + The random number generator to use. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + The number of successful trials. + + + + Samples a Binomially distributed random variable. + + The number of successes in N trials. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of Binomially distributed random variables. + + a sequence of successes in N trials. + + + + Samples a binomially distributed random variable. + + The random number generator to use. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + The number of successes in trials. + + + + Samples a sequence of binomially distributed random variable. + + The random number generator to use. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + a sequence of successes in trials. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + a sequence of successes in trials. + + + + Samples a binomially distributed random variable. + + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + The number of successes in trials. + + + + Samples a sequence of binomially distributed random variable. + + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + a sequence of successes in trials. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + a sequence of successes in trials. + + + + Discrete Univariate Categorical distribution. + For details about this distribution, see + Wikipedia - Categorical distribution. This + distribution is sometimes called the Discrete distribution. + + + The distribution is parameterized by a vector of ratios: in other words, the parameter + does not have to be normalized and sum to 1. The reason is that some vectors can't be exactly normalized + to sum to 1 in floating point representation. + + + Support: 0..k where k = length(probability mass array)-1 + + + + + Initializes a new instance of the Categorical class. + + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + If any of the probabilities are negative or do not sum to one. + + + + Initializes a new instance of the Categorical class. + + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + The random number generator which is used to draw random samples. + If any of the probabilities are negative or do not sum to one. + + + + Initializes a new instance of the Categorical class from a . The distribution + will not be automatically updated when the histogram changes. The categorical distribution will have + one value for each bucket and a probability for that value proportional to the bucket count. + + The histogram from which to create the categorical variable. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Checks whether the parameters of the distribution are valid. + + An array of nonnegative ratios: this array does not need to be normalized as this is often impossible using floating point arithmetic. + If any of the probabilities are negative returns false, or if the sum of parameters is 0.0; otherwise true + + + + Checks whether the parameters of the distribution are valid. + + An array of nonnegative ratios: this array does not need to be normalized as this is often impossible using floating point arithmetic. + If any of the probabilities are negative returns false, or if the sum of parameters is 0.0; otherwise true + + + + Gets the probability mass vector (non-negative ratios) of the multinomial. + + Sometimes the normalized probability vector cannot be represented exactly in a floating point representation. + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + Throws a . + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Gets he mode of the distribution. + + Throws a . + + + + Gets the median of the distribution. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. + + A real number between 0 and 1. + An integer between 0 and the size of the categorical (exclusive), that corresponds to the inverse CDF for the given probability. + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. + + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + A real number between 0 and 1. + An integer between 0 and the size of the categorical (exclusive), that corresponds to the inverse CDF for the given probability. + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. + + An array corresponding to a CDF for a categorical distribution. Not assumed to be normalized. + A real number between 0 and 1. + An integer between 0 and the size of the categorical (exclusive), that corresponds to the inverse CDF for the given probability. + + + + Computes the cumulative distribution function. This method performs no parameter checking. + If the probability mass was normalized, the resulting cumulative distribution is normalized as well (up to numerical errors). + + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + An array representing the unnormalized cumulative distribution function. + + + + Returns one trials from the categorical distribution. + + The random number generator to use. + The (unnormalized) cumulative distribution of the probability distribution. + One sample from the categorical distribution implied by . + + + + Samples a Binomially distributed random variable. + + The number of successful trials. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of Bernoulli distributed random variables. + + a sequence of successful trial counts. + + + + Samples one categorical distributed random variable; also known as the Discrete distribution. + + The random number generator to use. + An array of nonnegative ratios. Not assumed to be normalized. + One random integer between 0 and the size of the categorical (exclusive). + + + + Samples a categorically distributed random variable. + + The random number generator to use. + An array of nonnegative ratios. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + An array of nonnegative ratios. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Samples one categorical distributed random variable; also known as the Discrete distribution. + + An array of nonnegative ratios. Not assumed to be normalized. + One random integer between 0 and the size of the categorical (exclusive). + + + + Samples a categorically distributed random variable. + + An array of nonnegative ratios. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + An array of nonnegative ratios. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Samples one categorical distributed random variable; also known as the Discrete distribution. + + The random number generator to use. + An array of the cumulative distribution. Not assumed to be normalized. + One random integer between 0 and the size of the categorical (exclusive). + + + + Samples a categorically distributed random variable. + + The random number generator to use. + An array of the cumulative distribution. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + An array of the cumulative distribution. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Samples one categorical distributed random variable; also known as the Discrete distribution. + + An array of the cumulative distribution. Not assumed to be normalized. + One random integer between 0 and the size of the categorical (exclusive). + + + + Samples a categorically distributed random variable. + + An array of the cumulative distribution. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + An array of the cumulative distribution. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Continuous Univariate Cauchy distribution. + The Cauchy distribution is a symmetric continuous probability distribution. For details about this distribution, see + Wikipedia - Cauchy distribution. + + + + + Initializes a new instance of the class with the location parameter set to 0 and the scale parameter set to 1 + + + + + Initializes a new instance of the class. + + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + + + + Initializes a new instance of the class. + + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + + + + Gets the location (x0) of the distribution. + + + + + Gets the scale (γ) of the distribution. Range: γ > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Draws a random sample from the distribution. + + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Cauchy distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + the inverse cumulative density at . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + a sequence of samples from the distribution. + + + + Continuous Univariate Chi distribution. + This distribution is a continuous probability distribution. The distribution usually arises when a k-dimensional vector's orthogonal + components are independent and each follow a standard normal distribution. The length of the vector will + then have a chi distribution. + Wikipedia - Chi distribution. + + + + + Initializes a new instance of the class. + + The degrees of freedom (k) of the distribution. Range: k > 0. + + + + Initializes a new instance of the class. + + The degrees of freedom (k) of the distribution. Range: k > 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The degrees of freedom (k) of the distribution. Range: k > 0. + + + + Gets the degrees of freedom (k) of the Chi distribution. Range: k > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Generates a sample from the Chi distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Chi distribution. + + a sequence of samples from the distribution. + + + + Samples the distribution. + + The random number generator to use. + The degrees of freedom (k) of the distribution. Range: k > 0. + a random number from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The degrees of freedom (k) of the distribution. Range: k > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The degrees of freedom (k) of the distribution. Range: k > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The degrees of freedom (k) of the distribution. Range: k > 0. + the cumulative distribution at location . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The degrees of freedom (k) of the distribution. Range: k > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sequence of samples from the distribution. + + + + Continuous Univariate Chi-Squared distribution. + This distribution is a sum of the squares of k independent standard normal random variables. + Wikipedia - ChiSquare distribution. + + + + + Initializes a new instance of the class. + + The degrees of freedom (k) of the distribution. Range: k > 0. + + + + Initializes a new instance of the class. + + The degrees of freedom (k) of the distribution. Range: k > 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The degrees of freedom (k) of the distribution. Range: k > 0. + + + + Gets the degrees of freedom (k) of the Chi-Squared distribution. Range: k > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Generates a sample from the ChiSquare distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the ChiSquare distribution. + + a sequence of samples from the distribution. + + + + Samples the distribution. + + The random number generator to use. + The degrees of freedom (k) of the distribution. Range: k > 0. + a random number from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The degrees of freedom (k) of the distribution. Range: k > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The degrees of freedom (k) of the distribution. Range: k > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The degrees of freedom (k) of the distribution. Range: k > 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The degrees of freedom (k) of the distribution. Range: k > 0. + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + Generates a sample from the ChiSquare distribution. + + The random number generator to use. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Generates a sample from the ChiSquare distribution. + + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Continuous Univariate Uniform distribution. + The continuous uniform distribution is a distribution over real numbers. For details about this distribution, see + Wikipedia - Continuous uniform distribution. + + + + + Initializes a new instance of the ContinuousUniform class with lower bound 0 and upper bound 1. + + + + + Initializes a new instance of the ContinuousUniform class with given lower and upper bounds. + + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + If the upper bound is smaller than the lower bound. + + + + Initializes a new instance of the ContinuousUniform class with given lower and upper bounds. + + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + The random number generator which is used to draw random samples. + If the upper bound is smaller than the lower bound. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + + + + Gets the lower bound of the distribution. + + + + + Gets the upper bound of the distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + + Gets the median of the distribution. + + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Generates a sample from the ContinuousUniform distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the ContinuousUniform distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + the inverse cumulative density at . + + + + + Generates a sample from the ContinuousUniform distribution. + + The random number generator to use. + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + a uniformly distributed sample. + + + + Generates a sequence of samples from the ContinuousUniform distribution. + + The random number generator to use. + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + a sequence of uniformly distributed samples. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + a sequence of samples from the distribution. + + + + Generates a sample from the ContinuousUniform distribution. + + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + a uniformly distributed sample. + + + + Generates a sequence of samples from the ContinuousUniform distribution. + + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + a sequence of uniformly distributed samples. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + a sequence of samples from the distribution. + + + + Discrete Univariate Conway-Maxwell-Poisson distribution. + The Conway-Maxwell-Poisson distribution is a generalization of the Poisson, Geometric and Bernoulli + distributions. It is parameterized by two real numbers "lambda" and "nu". For + + nu = 0 the distribution reverts to a Geometric distribution + nu = 1 the distribution reverts to the Poisson distribution + nu -> infinity the distribution converges to a Bernoulli distribution + + This implementation will cache the value of the normalization constant. + Wikipedia - ConwayMaxwellPoisson distribution. + + + + + The mean of the distribution. + + + + + The variance of the distribution. + + + + + Caches the value of the normalization constant. + + + + + Since many properties of the distribution can only be computed approximately, the tolerance + level specifies how much error we accept. + + + + + Initializes a new instance of the class. + + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Initializes a new instance of the class. + + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + The random number generator which is used to draw random samples. + + + + Returns a that represents this instance. + + A that represents this instance. + + + + Tests whether the provided values are valid parameters for this distribution. + + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Gets the lambda (λ) parameter. Range: λ > 0. + + + + + Gets the rate of decay (ν) parameter. Range: ν ≥ 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution + + + + + Gets the median of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + the cumulative distribution at location . + + + + + Gets the normalization constant of the Conway-Maxwell-Poisson distribution. + + + + + Computes an approximate normalization constant for the CMP distribution. + + The lambda (λ) parameter for the CMP distribution. + The rate of decay (ν) parameter for the CMP distribution. + + an approximate normalization constant for the CMP distribution. + + + + + Returns one trials from the distribution. + + The random number generator to use. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + The z parameter. + + One sample from the distribution implied by , , and . + + + + + Samples a Conway-Maxwell-Poisson distributed random variable. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Samples a sequence of a Conway-Maxwell-Poisson distributed random variables. + + + a sequence of samples from a Conway-Maxwell-Poisson distribution. + + + + + Samples a random variable. + + The random number generator to use. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Samples a sequence of this random variable. + + The random number generator to use. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Samples a random variable. + + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Samples a sequence of this random variable. + + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Probability Distribution. + + + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Multivariate Dirichlet distribution. For details about this distribution, see + Wikipedia - Dirichlet distribution. + + + + + Initializes a new instance of the Dirichlet class. The distribution will + be initialized with the default random number generator. + + An array with the Dirichlet parameters. + + + + Initializes a new instance of the Dirichlet class. The distribution will + be initialized with the default random number generator. + + An array with the Dirichlet parameters. + The random number generator which is used to draw random samples. + + + + Initializes a new instance of the class. + random number generator. + The value of each parameter of the Dirichlet distribution. + The dimension of the Dirichlet distribution. + + + + Initializes a new instance of the class. + random number generator. + The value of each parameter of the Dirichlet distribution. + The dimension of the Dirichlet distribution. + The random number generator which is used to draw random samples. + + + + Returns a that represents this instance. + + + A that represents this instance. + + + + + Tests whether the provided values are valid parameters for this distribution. + No parameter can be less than zero and at least one parameter should be larger than zero. + + The parameters of the Dirichlet distribution. + + + + Gets or sets the parameters of the Dirichlet distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the dimension of the Dirichlet distribution. + + + + + Gets the sum of the Dirichlet parameters. + + + + + Gets the mean of the Dirichlet distribution. + + + + + Gets the variance of the Dirichlet distribution. + + + + + Gets the entropy of the distribution. + + + + + Computes the density of the distribution. + + The locations at which to compute the density. + the density at . + The Dirichlet distribution requires that the sum of the components of x equals 1. + You can also leave out the last component, and it will be computed from the others. + + + + Computes the log density of the distribution. + + The locations at which to compute the density. + the density at . + + + + Samples a Dirichlet distributed random vector. + + A sample from this distribution. + + + + Samples a Dirichlet distributed random vector. + + The random number generator to use. + The Dirichlet distribution parameter. + a sample from the distribution. + + + + Discrete Univariate Uniform distribution. + The discrete uniform distribution is a distribution over integers. The distribution + is parameterized by a lower and upper bound (both inclusive). + Wikipedia - Discrete uniform distribution. + + + + + Initializes a new instance of the DiscreteUniform class. + + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + + + + Initializes a new instance of the DiscreteUniform class. + + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + The random number generator which is used to draw random samples. + + + + Returns a that represents this instance. + + + A that represents this instance. + + + + + Tests whether the provided values are valid parameters for this distribution. + + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + + + + Gets the inclusive lower bound of the probability distribution. + + + + + Gets the inclusive upper bound of the probability distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the mode of the distribution; since every element in the domain has the same probability this method returns the middle one. + + + + + Gets the median of the distribution. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + the cumulative distribution at location . + + + + + Generates one sample from the discrete uniform distribution. This method does not do any parameter checking. + + The random source to use. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + A random sample from the discrete uniform distribution. + + + + Draws a random sample from the distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of uniformly distributed random variables. + + a sequence of samples from the distribution. + + + + Samples a uniformly distributed random variable. + + The random number generator to use. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + A sample from the discrete uniform distribution. + + + + Samples a sequence of uniformly distributed random variables. + + The random number generator to use. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + a sequence of samples from the discrete uniform distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + a sequence of samples from the discrete uniform distribution. + + + + Samples a uniformly distributed random variable. + + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + A sample from the discrete uniform distribution. + + + + Samples a sequence of uniformly distributed random variables. + + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + a sequence of samples from the discrete uniform distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + a sequence of samples from the discrete uniform distribution. + + + + Continuous Univariate Erlang distribution. + This distribution is is a continuous probability distribution with wide applicability primarily due to its + relation to the exponential and Gamma distributions. + Wikipedia - Erlang distribution. + + + + + Initializes a new instance of the class. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + + + + Initializes a new instance of the class. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + The random number generator which is used to draw random samples. + + + + Constructs a Erlang distribution from a shape and scale parameter. The distribution will + be initialized with the default random number generator. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The scale (μ) of the Erlang distribution. Range: μ ≥ 0. + The random number generator which is used to draw random samples. Optional, can be null. + + + + Constructs a Erlang distribution from a shape and inverse scale parameter. The distribution will + be initialized with the default random number generator. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + The random number generator which is used to draw random samples. Optional, can be null. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + + + + Gets the shape (k) of the Erlang distribution. Range: k ≥ 0. + + + + + Gets the rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + + + + + Gets the scale of the Erlang distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum value. + + + + + Gets the Maximum value. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Generates a sample from the Erlang distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Erlang distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + the cumulative distribution at location . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Continuous Univariate Exponential distribution. + The exponential distribution is a distribution over the real numbers parameterized by one non-negative parameter. + Wikipedia - exponential distribution. + + + + + Initializes a new instance of the class. + + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + + + + Initializes a new instance of the class. + + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + + + + Gets the rate (λ) parameter of the distribution. Range: λ ≥ 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Draws a random sample from the distribution. + + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Exponential distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + the inverse cumulative density at . + + + + + Draws a random sample from the distribution. + + The random number generator to use. + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Generates a sequence of samples from the Exponential distribution. + + The random number generator to use. + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Draws a random sample from the distribution. + + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Generates a sequence of samples from the Exponential distribution. + + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Continuous Univariate F-distribution, also known as Fisher-Snedecor distribution. + For details about this distribution, see + Wikipedia - FisherSnedecor distribution. + + + + + Initializes a new instance of the class. + + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + + + + Initializes a new instance of the class. + + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + + + + Gets the first degree of freedom (d1) of the distribution. Range: d1 > 0. + + + + + Gets the second degree of freedom (d2) of the distribution. Range: d2 > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Generates a sample from the FisherSnedecor distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the FisherSnedecor distribution. + + a sequence of samples from the distribution. + + + + Generates one sample from the FisherSnedecor distribution without parameter checking. + + The random number generator to use. + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + a FisherSnedecor distributed random number. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Generates a sample from the distribution. + + The random number generator to use. + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + a sequence of samples from the distribution. + + + + Continuous Univariate Gamma distribution. + For details about this distribution, see + Wikipedia - Gamma distribution. + + + The Gamma distribution is parametrized by a shape and inverse scale parameter. When we want + to specify a Gamma distribution which is a point distribution we set the shape parameter to be the + location of the point distribution and the inverse scale as positive infinity. The distribution + with shape and inverse scale both zero is undefined. + + Random number generation for the Gamma distribution is based on the algorithm in: + "A Simple Method for Generating Gamma Variables" - Marsaglia & Tsang + ACM Transactions on Mathematical Software, Vol. 26, No. 3, September 2000, Pages 363–372. + + + + + Initializes a new instance of the Gamma class. + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + + + + Initializes a new instance of the Gamma class. + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + The random number generator which is used to draw random samples. + + + + Constructs a Gamma distribution from a shape and scale parameter. The distribution will + be initialized with the default random number generator. + + The shape (k) of the Gamma distribution. Range: k ≥ 0. + The scale (θ) of the Gamma distribution. Range: θ ≥ 0 + The random number generator which is used to draw random samples. Optional, can be null. + + + + Constructs a Gamma distribution from a shape and inverse scale parameter. The distribution will + be initialized with the default random number generator. + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + The random number generator which is used to draw random samples. Optional, can be null. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + + + + Gets or sets the shape (k, α) of the Gamma distribution. Range: α ≥ 0. + + + + + Gets or sets the rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + + + + + Gets or sets the scale (θ) of the Gamma distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the Gamma distribution. + + + + + Gets the variance of the Gamma distribution. + + + + + Gets the standard deviation of the Gamma distribution. + + + + + Gets the entropy of the Gamma distribution. + + + + + Gets the skewness of the Gamma distribution. + + + + + Gets the mode of the Gamma distribution. + + + + + Gets the median of the Gamma distribution. + + + + + Gets the minimum of the Gamma distribution. + + + + + Gets the maximum of the Gamma distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Generates a sample from the Gamma distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Gamma distribution. + + a sequence of samples from the distribution. + + + + Sampling implementation based on: + "A Simple Method for Generating Gamma Variables" - Marsaglia & Tsang + ACM Transactions on Mathematical Software, Vol. 26, No. 3, September 2000, Pages 363–372. + This method performs no parameter checks. + + The random number generator to use. + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + A sample from a Gamma distributed random variable. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + the inverse cumulative density at . + + + + + Generates a sample from the Gamma distribution. + + The random number generator to use. + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the Gamma distribution. + + The random number generator to use. + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Generates a sample from the Gamma distribution. + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the Gamma distribution. + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Discrete Univariate Geometric distribution. + The Geometric distribution is a distribution over positive integers parameterized by one positive real number. + This implementation of the Geometric distribution will never generate 0's. + Wikipedia - geometric distribution. + + + + + Initializes a new instance of the Geometric class. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Initializes a new instance of the Geometric class. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + The random number generator which is used to draw random samples. + + + + Returns a that represents this instance. + + A that represents this instance. + + + + Tests whether the provided values are valid parameters for this distribution. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Gets the probability of generating a one. Range: 0 ≤ p ≤ 1. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + Throws a not supported exception. + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + the cumulative distribution at location . + + + + + Returns one sample from the distribution. + + The random number generator to use. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + One sample from the distribution implied by . + + + + Samples a Geometric distributed random variable. + + A sample from the Geometric distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of Geometric distributed random variables. + + a sequence of samples from the distribution. + + + + Samples a random variable. + + The random number generator to use. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Samples a sequence of this random variable. + + The random number generator to use. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Samples a random variable. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Samples a sequence of this random variable. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Discrete Univariate Hypergeometric distribution. + This distribution is a discrete probability distribution that describes the number of successes in a sequence + of n draws from a finite population without replacement, just as the binomial distribution + describes the number of successes for draws with replacement + Wikipedia - Hypergeometric distribution. + + + + + Initializes a new instance of the Hypergeometric class. + + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Initializes a new instance of the Hypergeometric class. + + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + The random number generator which is used to draw random samples. + + + + Returns a that represents this instance. + + + A that represents this instance. + + + + + Tests whether the provided values are valid parameters for this distribution. + + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the size of the population (N). + + + + + Gets the number of draws without replacement (n). + + + + + Gets the number successes within the population (K, M). + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + the cumulative distribution at location . + + + + + Generates a sample from the Hypergeometric distribution without doing parameter checking. + + The random number generator to use. + The size of the population (N). + The number successes within the population (K, M). + The n parameter of the distribution. + a random number from the Hypergeometric distribution. + + + + Samples a Hypergeometric distributed random variable. + + The number of successes in n trials. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of Hypergeometric distributed random variables. + + a sequence of successes in n trials. + + + + Samples a random variable. + + The random number generator to use. + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Samples a sequence of this random variable. + + The random number generator to use. + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Samples a random variable. + + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Samples a sequence of this random variable. + + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Continuous Univariate Inverse Gamma distribution. + The inverse Gamma distribution is a distribution over the positive real numbers parameterized by + two positive parameters. + Wikipedia - InverseGamma distribution. + + + + + Initializes a new instance of the class. + + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + + + + Initializes a new instance of the class. + + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + + + + Gets or sets the shape (α) parameter. Range: α > 0. + + + + + Gets or sets The scale (β) parameter. Range: β > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + Throws . + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Draws a random sample from the distribution. + + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Cauchy distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + the cumulative distribution at location . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + a sequence of samples from the distribution. + + + + Multivariate Inverse Wishart distribution. This distribution is + parameterized by the degrees of freedom nu and the scale matrix S. The inverse Wishart distribution + is the conjugate prior for the covariance matrix of a multivariate normal distribution. + Wikipedia - Inverse-Wishart distribution. + + + + + Caches the Cholesky factorization of the scale matrix. + + + + + Initializes a new instance of the class. + + The degree of freedom (ν) for the inverse Wishart distribution. + The scale matrix (Ψ) for the inverse Wishart distribution. + + + + Initializes a new instance of the class. + + The degree of freedom (ν) for the inverse Wishart distribution. + The scale matrix (Ψ) for the inverse Wishart distribution. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The degree of freedom (ν) for the inverse Wishart distribution. + The scale matrix (Ψ) for the inverse Wishart distribution. + + + + Gets or sets the degree of freedom (ν) for the inverse Wishart distribution. + + + + + Gets or sets the scale matrix (Ψ) for the inverse Wishart distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean. + + The mean of the distribution. + + + + Gets the mode of the distribution. + + The mode of the distribution. + A. O'Hagan, and J. J. Forster (2004). Kendall's Advanced Theory of Statistics: Bayesian Inference. 2B (2 ed.). Arnold. ISBN 0-340-80752-0. + + + + Gets the variance of the distribution. + + The variance of the distribution. + Kanti V. Mardia, J. T. Kent and J. M. Bibby (1979). Multivariate Analysis. + + + + Evaluates the probability density function for the inverse Wishart distribution. + + The matrix at which to evaluate the density at. + If the argument does not have the same dimensions as the scale matrix. + the density at . + + + + Samples an inverse Wishart distributed random variable by sampling + a Wishart random variable and inverting the matrix. + + a sample from the distribution. + + + + Samples an inverse Wishart distributed random variable by sampling + a Wishart random variable and inverting the matrix. + + The random number generator to use. + The degree of freedom (ν) for the inverse Wishart distribution. + The scale matrix (Ψ) for the inverse Wishart distribution. + a sample from the distribution. + + + + Continuous Univariate Laplace distribution. + The Laplace distribution is a distribution over the real numbers parameterized by a mean and + scale parameter. The PDF is: + p(x) = \frac{1}{2 * scale} \exp{- |x - mean| / scale}. + Wikipedia - Laplace distribution. + + + + + Initializes a new instance of the class (location = 0, scale = 1). + + + + + Initializes a new instance of the class. + + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + If is negative. + + + + Initializes a new instance of the class. + + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + The random number generator which is used to draw random samples. + If is negative. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + + + + Gets the location (μ) of the Laplace distribution. + + + + + Gets the scale (b) of the Laplace distribution. Range: b > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Samples a Laplace distributed random variable. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sample from the Laplace distribution. + + a sample from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + the cumulative distribution at location . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + a sequence of samples from the distribution. + + + + Continuous Univariate Log-Normal distribution. + For details about this distribution, see + Wikipedia - Log-Normal distribution. + + + + + Initializes a new instance of the class. + The distribution will be initialized with the default + random number generator. + + The log-scale (μ) of the logarithm of the distribution. + The shape (σ) of the logarithm of the distribution. Range: σ ≥ 0. + + + + Initializes a new instance of the class. + The distribution will be initialized with the default + random number generator. + + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + The random number generator which is used to draw random samples. + + + + Constructs a log-normal distribution with the desired mu and sigma parameters. + + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + The random number generator which is used to draw random samples. Optional, can be null. + A log-normal distribution. + + + + Constructs a log-normal distribution with the desired mean and variance. + + The mean of the log-normal distribution. + The variance of the log-normal distribution. + The random number generator which is used to draw random samples. Optional, can be null. + A log-normal distribution. + + + + Estimates the log-normal distribution parameters from sample data with maximum-likelihood. + + The samples to estimate the distribution parameters from. + The random number generator which is used to draw random samples. Optional, can be null. + A log-normal distribution. + MATLAB: lognfit + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + + + + Gets the log-scale (μ) (mean of the logarithm) of the distribution. + + + + + Gets the shape (σ) (standard deviation of the logarithm) of the distribution. Range: σ ≥ 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mu of the log-normal distribution. + + + + + Gets the variance of the log-normal distribution. + + + + + Gets the standard deviation of the log-normal distribution. + + + + + Gets the entropy of the log-normal distribution. + + + + + Gets the skewness of the log-normal distribution. + + + + + Gets the mode of the log-normal distribution. + + + + + Gets the median of the log-normal distribution. + + + + + Gets the minimum of the log-normal distribution. + + + + + Gets the maximum of the log-normal distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Generates a sample from the log-normal distribution using the Box-Muller algorithm. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the log-normal distribution using the Box-Muller algorithm. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + the density at . + + MATLAB: lognpdf + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the density. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + the cumulative distribution at location . + + MATLAB: logncdf + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + the inverse cumulative density at . + + MATLAB: logninv + + + + Generates a sample from the log-normal distribution using the Box-Muller algorithm. + + The random number generator to use. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the log-normal distribution using the Box-Muller algorithm. + + The random number generator to use. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + Generates a sample from the log-normal distribution using the Box-Muller algorithm. + + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the log-normal distribution using the Box-Muller algorithm. + + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + Multivariate Matrix-valued Normal distributions. The distribution + is parameterized by a mean matrix (M), a covariance matrix for the rows (V) and a covariance matrix + for the columns (K). If the dimension of M is d-by-m then V is d-by-d and K is m-by-m. + Wikipedia - MatrixNormal distribution. + + + + + The mean of the matrix normal distribution. + + + + + The covariance matrix for the rows. + + + + + The covariance matrix for the columns. + + + + + Initializes a new instance of the class. + + The mean of the matrix normal. + The covariance matrix for the rows. + The covariance matrix for the columns. + If the dimensions of the mean and two covariance matrices don't match. + + + + Initializes a new instance of the class. + + The mean of the matrix normal. + The covariance matrix for the rows. + The covariance matrix for the columns. + The random number generator which is used to draw random samples. + If the dimensions of the mean and two covariance matrices don't match. + + + + Returns a that represents this instance. + + + A that represents this instance. + + + + + Tests whether the provided values are valid parameters for this distribution. + + The mean of the matrix normal. + The covariance matrix for the rows. + The covariance matrix for the columns. + + + + Gets the mean. (M) + + The mean of the distribution. + + + + Gets the row covariance. (V) + + The row covariance. + + + + Gets the column covariance. (K) + + The column covariance. + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Evaluates the probability density function for the matrix normal distribution. + + The matrix at which to evaluate the density at. + the density at + If the argument does not have the correct dimensions. + + + + Samples a matrix normal distributed random variable. + + A random number from this distribution. + + + + Samples a matrix normal distributed random variable. + + The random number generator to use. + The mean of the matrix normal. + The covariance matrix for the rows. + The covariance matrix for the columns. + If the dimensions of the mean and two covariance matrices don't match. + a sequence of samples from the distribution. + + + + Samples a vector normal distributed random variable. + + The random number generator to use. + The mean of the vector normal distribution. + The covariance matrix of the vector normal distribution. + a sequence of samples from defined distribution. + + + + Multivariate Multinomial distribution. For details about this distribution, see + Wikipedia - Multinomial distribution. + + + The distribution is parameterized by a vector of ratios: in other words, the parameter + does not have to be normalized and sum to 1. The reason is that some vectors can't be exactly normalized + to sum to 1 in floating point representation. + + + + + Stores the normalized multinomial probabilities. + + + + + The number of trials. + + + + + Initializes a new instance of the Multinomial class. + + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + The number of trials. + If any of the probabilities are negative or do not sum to one. + If is negative. + + + + Initializes a new instance of the Multinomial class. + + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + The number of trials. + The random number generator which is used to draw random samples. + If any of the probabilities are negative or do not sum to one. + If is negative. + + + + Initializes a new instance of the Multinomial class from histogram . The distribution will + not be automatically updated when the histogram changes. + + Histogram instance + The number of trials. + If any of the probabilities are negative or do not sum to one. + If is negative. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + The number of trials. + If any of the probabilities are negative returns false, + if the sum of parameters is 0.0, or if the number of trials is negative; otherwise true. + + + + Gets the proportion of ratios. + + + + + Gets the number of trials. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Computes values of the probability mass function. + + Non-negative integers x1, ..., xk + The probability mass at location . + When is null. + When length of is not equal to event probabilities count. + + + + Computes values of the log probability mass function. + + Non-negative integers x1, ..., xk + The log probability mass at location . + When is null. + When length of is not equal to event probabilities count. + + + + Samples one multinomial distributed random variable. + + the counts for each of the different possible values. + + + + Samples a sequence multinomially distributed random variables. + + a sequence of counts for each of the different possible values. + + + + Samples one multinomial distributed random variable. + + The random number generator to use. + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + The number of trials. + the counts for each of the different possible values. + + + + Samples a multinomially distributed random variable. + + The random number generator to use. + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + The number of variables needed. + a sequence of counts for each of the different possible values. + + + + Discrete Univariate Negative Binomial distribution. + The negative binomial is a distribution over the natural numbers with two parameters r, p. For the special + case that r is an integer one can interpret the distribution as the number of failures before the r'th success + when the probability of success is p. + Wikipedia - NegativeBinomial distribution. + + + + + Initializes a new instance of the class. + + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Initializes a new instance of the class. + + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + The random number generator which is used to draw random samples. + + + + Returns a that represents this instance. + + + A that represents this instance. + + + + + Tests whether the provided values are valid parameters for this distribution. + + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Gets the number of successes. Range: r ≥ 0. + + + + + Gets the probability of success. Range: 0 ≤ p ≤ 1. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution + + + + + Gets the median of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + the cumulative distribution at location . + + + + + Samples a negative binomial distributed random variable. + + The random number generator to use. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + a sample from the distribution. + + + + Samples a NegativeBinomial distributed random variable. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of NegativeBinomial distributed random variables. + + a sequence of samples from the distribution. + + + + Samples a random variable. + + The random number generator to use. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Samples a sequence of this random variable. + + The random number generator to use. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Samples a random variable. + + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Samples a sequence of this random variable. + + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Continuous Univariate Normal distribution, also known as Gaussian distribution. + For details about this distribution, see + Wikipedia - Normal distribution. + + + + + Initializes a new instance of the Normal class. This is a normal distribution with mean 0.0 + and standard deviation 1.0. The distribution will + be initialized with the default random number generator. + + + + + Initializes a new instance of the Normal class. This is a normal distribution with mean 0.0 + and standard deviation 1.0. The distribution will + be initialized with the default random number generator. + + The random number generator which is used to draw random samples. + + + + Initializes a new instance of the Normal class with a particular mean and standard deviation. The distribution will + be initialized with the default random number generator. + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + + + + Initializes a new instance of the Normal class with a particular mean and standard deviation. The distribution will + be initialized with the default random number generator. + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + The random number generator which is used to draw random samples. + + + + Constructs a normal distribution from a mean and standard deviation. + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + The random number generator which is used to draw random samples. Optional, can be null. + a normal distribution. + + + + Constructs a normal distribution from a mean and variance. + + The mean (μ) of the normal distribution. + The variance (σ^2) of the normal distribution. + The random number generator which is used to draw random samples. Optional, can be null. + A normal distribution. + + + + Constructs a normal distribution from a mean and precision. + + The mean (μ) of the normal distribution. + The precision of the normal distribution. + The random number generator which is used to draw random samples. Optional, can be null. + A normal distribution. + + + + Estimates the normal distribution parameters from sample data with maximum-likelihood. + + The samples to estimate the distribution parameters from. + The random number generator which is used to draw random samples. Optional, can be null. + A normal distribution. + MATLAB: normfit + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + + + + Gets the mean (μ) of the normal distribution. + + + + + Gets the standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + + + + + Gets the variance of the normal distribution. + + + + + Gets the precision of the normal distribution. + + + + + Gets the random number generator which is used to draw random samples. + + + + + Gets the entropy of the normal distribution. + + + + + Gets the skewness of the normal distribution. + + + + + Gets the mode of the normal distribution. + + + + + Gets the median of the normal distribution. + + + + + Gets the minimum of the normal distribution. + + + + + Gets the maximum of the normal distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Generates a sample from the normal distribution using the Box-Muller algorithm. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the normal distribution using the Box-Muller algorithm. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + The location at which to compute the density. + the density at . + + MATLAB: normpdf + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + the cumulative distribution at location . + + MATLAB: normcdf + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + the inverse cumulative density at . + + MATLAB: norminv + + + + Generates a sample from the normal distribution using the Box-Muller algorithm. + + The random number generator to use. + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the normal distribution using the Box-Muller algorithm. + + The random number generator to use. + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + Generates a sample from the normal distribution using the Box-Muller algorithm. + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the normal distribution using the Box-Muller algorithm. + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + This structure represents the type over which the distribution + is defined. + + + + + The mean value. + + + + + The precision value. + + + + + Initializes a new instance of the struct. + + The mean of the pair. + The precision of the pair. + + + + Gets or sets the mean of the pair. + + + + + Gets or sets the precision of the pair. + + + + + Multivariate Normal-Gamma Distribution. + The distribution is the conjugate prior distribution for the + distribution. It specifies a prior over the mean and precision of the distribution. + It is parameterized by four numbers: the mean location, the mean scale, the precision shape and the + precision inverse scale. + The distribution NG(mu, tau | mloc,mscale,psscale,pinvscale) = Normal(mu | mloc, 1/(mscale*tau)) * Gamma(tau | psscale,pinvscale). + The following degenerate cases are special: when the precision is known, + the precision shape will encode the value of the precision while the precision inverse scale is positive + infinity. When the mean is known, the mean location will encode the value of the mean while the scale + will be positive infinity. A completely degenerate NormalGamma distribution with known mean and precision is possible as well. + Wikipedia - Normal-Gamma distribution. + + + + + Initializes a new instance of the class. + + The location of the mean. + The scale of the mean. + The shape of the precision. + The inverse scale of the precision. + + + + Initializes a new instance of the class. + + The location of the mean. + The scale of the mean. + The shape of the precision. + The inverse scale of the precision. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The location of the mean. + The scale of the mean. + The shape of the precision. + The inverse scale of the precision. + + + + Gets the location of the mean. + + + + + Gets the scale of the mean. + + + + + Gets the shape of the precision. + + + + + Gets the inverse scale of the precision. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Returns the marginal distribution for the mean of the NormalGamma distribution. + + the marginal distribution for the mean of the NormalGamma distribution. + + + + Returns the marginal distribution for the precision of the distribution. + + The marginal distribution for the precision of the distribution/ + + + + Gets the mean of the distribution. + + The mean of the distribution. + + + + Gets the variance of the distribution. + + The mean of the distribution. + + + + Evaluates the probability density function for a NormalGamma distribution. + + The mean/precision pair of the distribution + Density value + + + + Evaluates the probability density function for a NormalGamma distribution. + + The mean of the distribution + The precision of the distribution + Density value + + + + Evaluates the log probability density function for a NormalGamma distribution. + + The mean/precision pair of the distribution + The log of the density value + + + + Evaluates the log probability density function for a NormalGamma distribution. + + The mean of the distribution + The precision of the distribution + The log of the density value + + + + Generates a sample from the NormalGamma distribution. + + a sample from the distribution. + + + + Generates a sequence of samples from the NormalGamma distribution + + a sequence of samples from the distribution. + + + + Generates a sample from the NormalGamma distribution. + + The random number generator to use. + The location of the mean. + The scale of the mean. + The shape of the precision. + The inverse scale of the precision. + a sample from the distribution. + + + + Generates a sequence of samples from the NormalGamma distribution + + The random number generator to use. + The location of the mean. + The scale of the mean. + The shape of the precision. + The inverse scale of the precision. + a sequence of samples from the distribution. + + + + Continuous Univariate Pareto distribution. + The Pareto distribution is a power law probability distribution that coincides with social, + scientific, geophysical, actuarial, and many other types of observable phenomena. + For details about this distribution, see + Wikipedia - Pareto distribution. + + + + + Initializes a new instance of the class. + + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + If or are negative. + + + + Initializes a new instance of the class. + + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + The random number generator which is used to draw random samples. + If or are negative. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + + + + Gets the scale (xm) of the distribution. Range: xm > 0. + + + + + Gets the shape (α) of the distribution. Range: α > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Draws a random sample from the distribution. + + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Pareto distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + the inverse cumulative density at . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + a sequence of samples from the distribution. + + + + Discrete Univariate Poisson distribution. + + + Distribution is described at Wikipedia - Poisson distribution. + Knuth's method is used to generate Poisson distributed random variables. + f(x) = exp(-λ)*λ^x/x!; + + + + + Initializes a new instance of the class. + + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + If is equal or less then 0.0. + + + + Initializes a new instance of the class. + + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + The random number generator which is used to draw random samples. + If is equal or less then 0.0. + + + + Returns a that represents this instance. + + + A that represents this instance. + + + + + Tests whether the provided values are valid parameters for this distribution. + + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + + + + Gets the Poisson distribution parameter λ. Range: λ > 0. + + + + + Gets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + Approximation, see Wikipedia Poisson distribution + + + + Gets the skewness of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + Approximation, see Wikipedia Poisson distribution + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + the cumulative distribution at location . + + + + + Generates one sample from the Poisson distribution. + + The random source to use. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + A random sample from the Poisson distribution. + + + + Generates one sample from the Poisson distribution by Knuth's method. + + The random source to use. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + A random sample from the Poisson distribution. + + + + Generates one sample from the Poisson distribution by "Rejection method PA". + + The random source to use. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + A random sample from the Poisson distribution. + "Rejection method PA" from "The Computer Generation of Poisson Random Variables" by A. C. Atkinson, + Journal of the Royal Statistical Society Series C (Applied Statistics) Vol. 28, No. 1. (1979) + The article is on pages 29-35. The algorithm given here is on page 32. + + + + Samples a Poisson distributed random variable. + + A sample from the Poisson distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of Poisson distributed random variables. + + a sequence of successes in N trials. + + + + Samples a Poisson distributed random variable. + + The random number generator to use. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + A sample from the Poisson distribution. + + + + Samples a sequence of Poisson distributed random variables. + + The random number generator to use. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Samples a Poisson distributed random variable. + + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + A sample from the Poisson distribution. + + + + Samples a sequence of Poisson distributed random variables. + + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Continuous Univariate Rayleigh distribution. + The Rayleigh distribution (pronounced /ˈreɪli/) is a continuous probability distribution. As an + example of how it arises, the wind speed will have a Rayleigh distribution if the components of + the two-dimensional wind velocity vector are uncorrelated and normally distributed with equal variance. + For details about this distribution, see + Wikipedia - Rayleigh distribution. + + + + + Initializes a new instance of the class. + + The scale (σ) of the distribution. Range: σ > 0. + If is negative. + + + + Initializes a new instance of the class. + + The scale (σ) of the distribution. Range: σ > 0. + The random number generator which is used to draw random samples. + If is negative. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The scale (σ) of the distribution. Range: σ > 0. + + + + Gets the scale (σ) of the distribution. Range: σ > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Draws a random sample from the distribution. + + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Rayleigh distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The scale (σ) of the distribution. Range: σ > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The scale (σ) of the distribution. Range: σ > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The scale (σ) of the distribution. Range: σ > 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The scale (σ) of the distribution. Range: σ > 0. + the inverse cumulative density at . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The scale (σ) of the distribution. Range: σ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The scale (σ) of the distribution. Range: σ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Continuous Univariate Stable distribution. + A random variable is said to be stable (or to have a stable distribution) if it has + the property that a linear combination of two independent copies of the variable has + the same distribution, up to location and scale parameters. + For details about this distribution, see + Wikipedia - Stable distribution. + + + + + Initializes a new instance of the class. + + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + + + + Initializes a new instance of the class. + + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + + + + Gets the stability (α) of the distribution. Range: 2 ≥ α > 0. + + + + + Gets The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + + + + + Gets the scale (c) of the distribution. Range: c > 0. + + + + + Gets the location (μ) of the distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets he entropy of the distribution. + + Always throws a not supported exception. + + + + Gets the skewness of the distribution. + + Throws a not supported exception of Alpha != 2. + + + + Gets the mode of the distribution. + + Throws a not supported exception if Beta != 0. + + + + Gets the median of the distribution. + + Throws a not supported exception if Beta != 0. + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + Throws a not supported exception if Alpha != 2, (Alpha != 1 and Beta !=0), or (Alpha != 0.5 and Beta != 1) + + + + Samples the distribution. + + The random number generator to use. + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + a random number from the distribution. + + + + Draws a random sample from the distribution. + + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Stable distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + the cumulative distribution at location . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + a sequence of samples from the distribution. + + + + Continuous Univariate Student's T-distribution. + Implements the univariate Student t-distribution. For details about this + distribution, see + + Wikipedia - Student's t-distribution. + + We use a slightly generalized version (compared to + Wikipedia) of the Student t-distribution. Namely, one which also + parameterizes the location and scale. See the book "Bayesian Data + Analysis" by Gelman et al. for more details. + The density of the Student t-distribution p(x|mu,scale,dof) = + Gamma((dof+1)/2) (1 + (x - mu)^2 / (scale * scale * dof))^(-(dof+1)/2) / + (Gamma(dof/2)*Sqrt(dof*pi*scale)). + The distribution will use the by + default. Users can get/set the random number generator by using the + property. + The statistics classes will check all the incoming parameters + whether they are in the allowed range. This might involve heavy + computation. Optionally, by setting Control.CheckDistributionParameters + to false, all parameter checks can be turned off. + + + + Initializes a new instance of the StudentT class. This is a Student t-distribution with location 0.0 + scale 1.0 and degrees of freedom 1. + + + + + Initializes a new instance of the StudentT class with a particular location, scale and degrees of + freedom. + + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + + + + Initializes a new instance of the StudentT class with a particular location, scale and degrees of + freedom. + + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + + + + Gets the location (μ) of the Student t-distribution. + + + + + Gets the scale (σ) of the Student t-distribution. Range: σ > 0. + + + + + Gets the degrees of freedom (ν) of the Student t-distribution. Range: ν > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the Student t-distribution. + + + + + Gets the variance of the Student t-distribution. + + + + + Gets the standard deviation of the Student t-distribution. + + + + + Gets the entropy of the Student t-distribution. + + + + + Gets the skewness of the Student t-distribution. + + + + + Gets the mode of the Student t-distribution. + + + + + Gets the median of the Student t-distribution. + + + + + Gets the minimum of the Student t-distribution. + + + + + Gets the maximum of the Student t-distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Samples student-t distributed random variables. + + The algorithm is method 2 in section 5, chapter 9 + in L. Devroye's "Non-Uniform Random Variate Generation" + The random number generator to use. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + a random number from the standard student-t distribution. + + + + Generates a sample from the Student t-distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Student t-distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Generates a sample from the Student t-distribution. + + The random number generator to use. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the Student t-distribution using the Box-Muller algorithm. + + The random number generator to use. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the Student t-distribution. + + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the Student t-distribution using the Box-Muller algorithm. + + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + a sequence of samples from the distribution. + + + + Continuous Univariate Weibull distribution. + For details about this distribution, see + Wikipedia - Weibull distribution. + + + The Weibull distribution is parametrized by a shape and scale parameter. + + + + + Reusable intermediate result 1 / (_scale ^ _shape) + + + By caching this parameter we can get slightly better numerics precision + in certain constellations without any additional computations. + + + + + Initializes a new instance of the Weibull class. + + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + + + + Initializes a new instance of the Weibull class. + + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + + + + Gets the shape (k) of the Weibull distribution. Range: k > 0. + + + + + Gets the scale (λ) of the Weibull distribution. Range: λ > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the Weibull distribution. + + + + + Gets the variance of the Weibull distribution. + + + + + Gets the standard deviation of the Weibull distribution. + + + + + Gets the entropy of the Weibull distribution. + + + + + Gets the skewness of the Weibull distribution. + + + + + Gets the mode of the Weibull distribution. + + + + + Gets the median of the Weibull distribution. + + + + + Gets the minimum of the Weibull distribution. + + + + + Gets the maximum of the Weibull distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Generates a sample from the Weibull distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Weibull distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + the cumulative distribution at location . + + + + + Implemented according to: Parameter estimation of the Weibull probability distribution, 1994, Hongzhu Qiao, Chris P. Tsokos + + + + Returns a Weibull distribution. + + + + Generates a sample from the Weibull distribution. + + The random number generator to use. + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the Weibull distribution. + + The random number generator to use. + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the Weibull distribution. + + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the Weibull distribution. + + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Multivariate Wishart distribution. This distribution is + parameterized by the degrees of freedom nu and the scale matrix S. The Wishart distribution + is the conjugate prior for the precision (inverse covariance) matrix of the multivariate + normal distribution. + Wikipedia - Wishart distribution. + + + + + The degrees of freedom for the Wishart distribution. + + + + + The scale matrix for the Wishart distribution. + + + + + Caches the Cholesky factorization of the scale matrix. + + + + + Initializes a new instance of the class. + + The degrees of freedom (n) for the Wishart distribution. + The scale matrix (V) for the Wishart distribution. + + + + Initializes a new instance of the class. + + The degrees of freedom (n) for the Wishart distribution. + The scale matrix (V) for the Wishart distribution. + The random number generator which is used to draw random samples. + + + + Tests whether the provided values are valid parameters for this distribution. + + The degrees of freedom (n) for the Wishart distribution. + The scale matrix (V) for the Wishart distribution. + + + + Gets or sets the degrees of freedom (n) for the Wishart distribution. + + + + + Gets or sets the scale matrix (V) for the Wishart distribution. + + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + The mean of the distribution. + + + + Gets the mode of the distribution. + + The mode of the distribution. + + + + Gets the variance of the distribution. + + The variance of the distribution. + + + + Evaluates the probability density function for the Wishart distribution. + + The matrix at which to evaluate the density at. + If the argument does not have the same dimensions as the scale matrix. + the density at . + + + + Samples a Wishart distributed random variable using the method + Algorithm AS 53: Wishart Variate Generator + W. B. Smith and R. R. Hocking + Applied Statistics, Vol. 21, No. 3 (1972), pp. 341-345 + + A random number from this distribution. + + + + Samples a Wishart distributed random variable using the method + Algorithm AS 53: Wishart Variate Generator + W. B. Smith and R. R. Hocking + Applied Statistics, Vol. 21, No. 3 (1972), pp. 341-345 + + The random number generator to use. + The degrees of freedom (n) for the Wishart distribution. + The scale matrix (V) for the Wishart distribution. + a sequence of samples from the distribution. + + + + Samples the distribution. + + The random number generator to use. + The degrees of freedom (n) for the Wishart distribution. + The scale matrix (V) for the Wishart distribution. + The cholesky decomposition to use. + a random number from the distribution. + + + + Discrete Univariate Zipf distribution. + Zipf's law, an empirical law formulated using mathematical statistics, refers to the fact + that many types of data studied in the physical and social sciences can be approximated with + a Zipfian distribution, one of a family of related discrete power law probability distributions. + For details about this distribution, see + Wikipedia - Zipf distribution. + + + + + The s parameter of the distribution. + + + + + The n parameter of the distribution. + + + + + Initializes a new instance of the class. + + The s parameter of the distribution. + The n parameter of the distribution. + + + + Initializes a new instance of the class. + + The s parameter of the distribution. + The n parameter of the distribution. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The s parameter of the distribution. + The n parameter of the distribution. + + + + Gets or sets the s parameter of the distribution. + + + + + Gets or sets the n parameter of the distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The s parameter of the distribution. + The n parameter of the distribution. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The s parameter of the distribution. + The n parameter of the distribution. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The s parameter of the distribution. + The n parameter of the distribution. + the cumulative distribution at location . + + + + + Generates a sample from the Zipf distribution without doing parameter checking. + + The random number generator to use. + The s parameter of the distribution. + The n parameter of the distribution. + a random number from the Zipf distribution. + + + + Draws a random sample from the distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of zipf distributed random variables. + + a sequence of samples from the distribution. + + + + Samples a random variable. + + The random number generator to use. + The s parameter of the distribution. + The n parameter of the distribution. + + + + Samples a sequence of this random variable. + + The random number generator to use. + The s parameter of the distribution. + The n parameter of the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The s parameter of the distribution. + The n parameter of the distribution. + + + + Samples a random variable. + + The s parameter of the distribution. + The n parameter of the distribution. + + + + Samples a sequence of this random variable. + + The s parameter of the distribution. + The n parameter of the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The s parameter of the distribution. + The n parameter of the distribution. + + + + Continuous Univariate Probability Distribution. + + + + + + Gets the mode of the distribution. + + + + + Gets the smallest element in the domain of the distribution which can be represented by a double. + + + + + Gets the largest element in the domain of the distribution which can be represented by a double. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + Draws a random sample from the distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Draws a sequence of random samples from the distribution. + + an infinite sequence of samples from the distribution. + + + + Discrete Univariate Probability Distribution. + + + + + + Gets the mode of the distribution. + + + + + Gets the smallest element in the domain of the distribution which can be represented by an integer. + + + + + Gets the largest element in the domain of the distribution which can be represented by an integer. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Draws a random sample from the distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Draws a sequence of random samples from the distribution. + + an infinite sequence of samples from the distribution. + + + + Univariate Probability Distribution. + + + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the median of the distribution. + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Integer number theory functions. + + + + + Canonical Modulus. The result has the sign of the divisor. + + + + + Canonical Modulus. The result has the sign of the divisor. + + + + + Canonical Modulus. The result has the sign of the divisor. + + + + + Canonical Modulus. The result has the sign of the divisor. + + + + + Canonical Modulus. The result has the sign of the divisor. + + + + + Remainder (% operator). The result has the sign of the dividend. + + + + + Remainder (% operator). The result has the sign of the dividend. + + + + + Remainder (% operator). The result has the sign of the dividend. + + + + + Remainder (% operator). The result has the sign of the dividend. + + + + + Remainder (% operator). The result has the sign of the dividend. + + + + + Find out whether the provided 32 bit integer is an even number. + + The number to very whether it's even. + True if and only if it is an even number. + + + + Find out whether the provided 64 bit integer is an even number. + + The number to very whether it's even. + True if and only if it is an even number. + + + + Find out whether the provided 32 bit integer is an odd number. + + The number to very whether it's odd. + True if and only if it is an odd number. + + + + Find out whether the provided 64 bit integer is an odd number. + + The number to very whether it's odd. + True if and only if it is an odd number. + + + + Find out whether the provided 32 bit integer is a perfect power of two. + + The number to very whether it's a power of two. + True if and only if it is a power of two. + + + + Find out whether the provided 64 bit integer is a perfect power of two. + + The number to very whether it's a power of two. + True if and only if it is a power of two. + + + + Find out whether the provided 32 bit integer is a perfect square, i.e. a square of an integer. + + The number to very whether it's a perfect square. + True if and only if it is a perfect square. + + + + Find out whether the provided 64 bit integer is a perfect square, i.e. a square of an integer. + + The number to very whether it's a perfect square. + True if and only if it is a perfect square. + + + + Raises 2 to the provided integer exponent (0 <= exponent < 31). + + The exponent to raise 2 up to. + 2 ^ exponent. + + + + + Raises 2 to the provided integer exponent (0 <= exponent < 63). + + The exponent to raise 2 up to. + 2 ^ exponent. + + + + + Evaluate the binary logarithm of an integer number. + + Two-step method using a De Bruijn-like sequence table lookup. + + + + Find the closest perfect power of two that is larger or equal to the provided + 32 bit integer. + + The number of which to find the closest upper power of two. + A power of two. + + + + + Find the closest perfect power of two that is larger or equal to the provided + 64 bit integer. + + The number of which to find the closest upper power of two. + A power of two. + + + + + Returns the greatest common divisor (gcd) of two integers using Euclid's algorithm. + + First Integer: a. + Second Integer: b. + Greatest common divisor gcd(a,b) + + + + Returns the greatest common divisor (gcd) of a set of integers using Euclid's + algorithm. + + List of Integers. + Greatest common divisor gcd(list of integers) + + + + Returns the greatest common divisor (gcd) of a set of integers using Euclid's algorithm. + + List of Integers. + Greatest common divisor gcd(list of integers) + + + + Computes the extended greatest common divisor, such that a*x + b*y = gcd(a,b). + + First Integer: a. + Second Integer: b. + Resulting x, such that a*x + b*y = gcd(a,b). + Resulting y, such that a*x + b*y = gcd(a,b) + Greatest common divisor gcd(a,b) + + + long x,y,d; + d = Fn.GreatestCommonDivisor(45,18,out x, out y); + -> d == 9 && x == 1 && y == -2 + + The gcd of 45 and 18 is 9: 18 = 2*9, 45 = 5*9. 9 = 1*45 -2*18, therefore x=1 and y=-2. + + + + + Returns the least common multiple (lcm) of two integers using Euclid's algorithm. + + First Integer: a. + Second Integer: b. + Least common multiple lcm(a,b) + + + + Returns the least common multiple (lcm) of a set of integers using Euclid's algorithm. + + List of Integers. + Least common multiple lcm(list of integers) + + + + Returns the least common multiple (lcm) of a set of integers using Euclid's algorithm. + + List of Integers. + Least common multiple lcm(list of integers) + + + + Returns the greatest common divisor (gcd) of two big integers. + + First Integer: a. + Second Integer: b. + Greatest common divisor gcd(a,b) + + + + Returns the greatest common divisor (gcd) of a set of big integers. + + List of Integers. + Greatest common divisor gcd(list of integers) + + + + Returns the greatest common divisor (gcd) of a set of big integers. + + List of Integers. + Greatest common divisor gcd(list of integers) + + + + Computes the extended greatest common divisor, such that a*x + b*y = gcd(a,b). + + First Integer: a. + Second Integer: b. + Resulting x, such that a*x + b*y = gcd(a,b). + Resulting y, such that a*x + b*y = gcd(a,b) + Greatest common divisor gcd(a,b) + + + long x,y,d; + d = Fn.GreatestCommonDivisor(45,18,out x, out y); + -> d == 9 && x == 1 && y == -2 + + The gcd of 45 and 18 is 9: 18 = 2*9, 45 = 5*9. 9 = 1*45 -2*18, therefore x=1 and y=-2. + + + + + Returns the least common multiple (lcm) of two big integers. + + First Integer: a. + Second Integer: b. + Least common multiple lcm(a,b) + + + + Returns the least common multiple (lcm) of a set of big integers. + + List of Integers. + Least common multiple lcm(list of integers) + + + + Returns the least common multiple (lcm) of a set of big integers. + + List of Integers. + Least common multiple lcm(list of integers) + + + + Generate samples by sampling a function at the provided points. + + + + + Generate a sample sequence by sampling a function at the provided point sequence. + + + + + Generate samples by sampling a function at the provided points. + + + + + Generate a sample sequence by sampling a function at the provided point sequence. + + + + + Generate a linearly spaced sample vector of the given length between the specified values (inclusive). + Equivalent to MATLAB linspace but with the length as first instead of last argument. + + + + + Generate samples by sampling a function at linearly spaced points between the specified values (inclusive). + + + + + Generate a base 10 logarithmically spaced sample vector of the given length between the specified decade exponents (inclusive). + Equivalent to MATLAB logspace but with the length as first instead of last argument. + + + + + Generate samples by sampling a function at base 10 logarithmically spaced points between the specified decade exponents (inclusive). + + + + + Generate a linearly spaced sample vector within the inclusive interval (start, stop) and step 1. + Equivalent to MATLAB colon operator (:). + + + + + Generate a linearly spaced sample vector within the inclusive interval (start, stop) and step 1. + Equivalent to MATLAB colon operator (:). + + + + + Generate a linearly spaced sample vector within the inclusive interval (start, stop) and the provided step. + The start value is aways included as first value, but stop is only included if it stop-start is a multiple of step. + Equivalent to MATLAB double colon operator (::). + + + + + Generate a linearly spaced sample vector within the inclusive interval (start, stop) and the provided step. + The start value is aways included as first value, but stop is only included if it stop-start is a multiple of step. + Equivalent to MATLAB double colon operator (::). + + + + + Generate a linearly spaced sample vector within the inclusive interval (start, stop) and the provide step. + The start value is aways included as first value, but stop is only included if it stop-start is a multiple of step. + Equivalent to MATLAB double colon operator (::). + + + + + Generate samples by sampling a function at linearly spaced points within the inclusive interval (start, stop) and the provide step. + The start value is aways included as first value, but stop is only included if it stop-start is a multiple of step. + + + + + Create a periodic wave. + + The number of samples to generate. + Samples per time unit (Hz). Must be larger than twice the frequency to satisfy the Nyquist criterion. + Frequency in periods per time unit (Hz). + The length of the period when sampled at one sample per time unit. This is the interval of the periodic domain, a typical value is 1.0, or 2*Pi for angular functions. + Optional phase offset. + Optional delay, relative to the phase. + + + + Create a periodic wave. + + The number of samples to generate. + The function to apply to each of the values and evaluate the resulting sample. + Samples per time unit (Hz). Must be larger than twice the frequency to satisfy the Nyquist criterion. + Frequency in periods per time unit (Hz). + The length of the period when sampled at one sample per time unit. This is the interval of the periodic domain, a typical value is 1.0, or 2*Pi for angular functions. + Optional phase offset. + Optional delay, relative to the phase. + + + + Create an infinite periodic wave sequence. + + Samples per time unit (Hz). Must be larger than twice the frequency to satisfy the Nyquist criterion. + Frequency in periods per time unit (Hz). + The length of the period when sampled at one sample per time unit. This is the interval of the periodic domain, a typical value is 1.0, or 2*Pi for angular functions. + Optional phase offset. + Optional delay, relative to the phase. + + + + Create an infinite periodic wave sequence. + + The function to apply to each of the values and evaluate the resulting sample. + Samples per time unit (Hz). Must be larger than twice the frequency to satisfy the Nyquist criterion. + Frequency in periods per time unit (Hz). + The length of the period when sampled at one sample per time unit. This is the interval of the periodic domain, a typical value is 1.0, or 2*Pi for angular functions. + Optional phase offset. + Optional delay, relative to the phase. + + + + Create a Sine wave. + + The number of samples to generate. + Samples per time unit (Hz). Must be larger than twice the frequency to satisfy the Nyquist criterion. + Frequency in periods per time unit (Hz). + The maximal reached peak. + The mean, or DC part, of the signal. + Optional phase offset. + Optional delay, relative to the phase. + + + + Create an infinite Sine wave sequence. + + Samples per unit. + Frequency in samples per unit. + The maximal reached peak. + The mean, or DC part, of the signal. + Optional phase offset. + Optional delay, relative to the phase. + + + + Create a periodic square wave, starting with the high phase. + + The number of samples to generate. + Number of samples of the high phase. + Number of samples of the low phase. + Sample value to be emitted during the low phase. + Sample value to be emitted during the high phase. + Optional delay. + + + + Create an infinite periodic square wave sequence, starting with the high phase. + + Number of samples of the high phase. + Number of samples of the low phase. + Sample value to be emitted during the low phase. + Sample value to be emitted during the high phase. + Optional delay. + + + + Create a periodic triangle wave, starting with the raise phase from the lowest sample. + + The number of samples to generate. + Number of samples of the raise phase. + Number of samples of the fall phase. + Lowest sample value. + Highest sample value. + Optional delay. + + + + Create an infinite periodic triangle wave sequence, starting with the raise phase from the lowest sample. + + Number of samples of the raise phase. + Number of samples of the fall phase. + Lowest sample value. + Highest sample value. + Optional delay. + + + + Create a periodic sawtooth wave, starting with the lowest sample. + + The number of samples to generate. + Number of samples a full sawtooth period. + Lowest sample value. + Highest sample value. + Optional delay. + + + + Create an infinite periodic sawtooth wave sequence, starting with the lowest sample. + + Number of samples a full sawtooth period. + Lowest sample value. + Highest sample value. + Optional delay. + + + + Create an array with each field set to the same value. + + The number of samples to generate. + The value that each field should be set to. + + + + Create an infinite sequence where each element has the same value. + + The value that each element should be set to. + + + + Create a Heaviside Step sample vector. + + The number of samples to generate. + The maximal reached peak. + Offset to the time axis. + + + + Create an infinite Heaviside Step sample sequence. + + The maximal reached peak. + Offset to the time axis. + + + + Create a Kronecker Delta impulse sample vector. + + The number of samples to generate. + The maximal reached peak. + Offset to the time axis. Zero or positive. + + + + Create a Kronecker Delta impulse sample vector. + + The maximal reached peak. + Offset to the time axis, hence the sample index of the impulse. + + + + Create a periodic Kronecker Delta impulse sample vector. + + The number of samples to generate. + impulse sequence period. + The maximal reached peak. + Offset to the time axis. Zero or positive. + + + + Create a Kronecker Delta impulse sample vector. + + impulse sequence period. + The maximal reached peak. + Offset to the time axis. Zero or positive. + + + + Generate samples generated by the given computation. + + + + + Generate an infinite sequence generated by the given computation. + + + + + Generate a Fibonacci sequence, including zero as first value. + + + + + Generate an infinite Fibonacci sequence, including zero as first value. + + + + + Create random samples, uniform between 0 and 1. + Faster than other methods but with reduced guarantees on randomness. + + + + + Create an infinite random sample sequence, uniform between 0 and 1. + Faster than other methods but with reduced guarantees on randomness. + + + + + Generate samples by sampling a function at samples from a probability distribution, uniform between 0 and 1. + Faster than other methods but with reduced guarantees on randomness. + + + + + Generate a sample sequence by sampling a function at samples from a probability distribution, uniform between 0 and 1. + Faster than other methods but with reduced guarantees on randomness. + + + + + Generate samples by sampling a function at sample pairs from a probability distribution, uniform between 0 and 1. + Faster than other methods but with reduced guarantees on randomness. + + + + + Generate a sample sequence by sampling a function at sample pairs from a probability distribution, uniform between 0 and 1. + Faster than other methods but with reduced guarantees on randomness. + + + + + Create samples with independent amplitudes of standard distribution. + + + + + Create an infinite sample sequence with independent amplitudes of standard distribution. + + + + + Create samples with independent amplitudes of normal distribution and a flat spectral density. + + + + + Create an infinite sample sequence with independent amplitudes of normal distribution and a flat spectral density. + + + + + Create samples with independent amplitudes of normal distribution and a flat spectral density. + + + + + Create an infinite sample sequence with independent amplitudes of normal distribution and a flat spectral density. + + + + + Create skew alpha stable samples. + + The number of samples to generate. + Stability alpha-parameter of the stable distribution + Skewness beta-parameter of the stable distribution + Scale c-parameter of the stable distribution + Location mu-parameter of the stable distribution + + + + Create skew alpha stable samples. + + Stability alpha-parameter of the stable distribution + Skewness beta-parameter of the stable distribution + Scale c-parameter of the stable distribution + Location mu-parameter of the stable distribution + + + + Create random samples. + + + + + Create an infinite random sample sequence. + + + + + Create random samples. + + + + + Create an infinite random sample sequence. + + + + + Create random samples. + + + + + Create an infinite random sample sequence. + + + + + Create random samples. + + + + + Create an infinite random sample sequence. + + + + + Generate samples by sampling a function at samples from a probability distribution. + + + + + Generate a sample sequence by sampling a function at samples from a probability distribution. + + + + + Generate samples by sampling a function at sample pairs from a probability distribution. + + + + + Generate a sample sequence by sampling a function at sample pairs from a probability distribution. + + + + + Calculates the R-Squared value, also known as coefficient of determination, + given modelled and observed values + + The values expected from the modelled + The actual data set values obtained + Squared Person product-momentum correlation coefficient. + + + + Calculates the R value, also known as linear correlation coefficient, + given modelled and observed values + + The values expected from the modelled + The actual data set values obtained + Person product-momentum correlation coefficient. + + + + Calculates the Standard Error of the regression, given a sequence of + modeled/predicted values, and a sequence of actual/observed values + + The modelled/predicted values + The observed/actual values + The Standard Error of the regression + + + + Calculates the Standard Error of the regression, given a sequence of + modeled/predicted values, and a sequence of actual/observed values + + The modelled/predicted values + The observed/actual values + The degrees of freedom by which the + number of samples is reduced for performing the Standard Error calculation + The Standard Error of the regression + + + + Complex Fast (FFT) Implementation of the Discrete Fourier Transform (DFT). + + + Complex Fast (FFT) Implementation of the Discrete Fourier Transform (DFT). + + + Complex Fast (FFT) Implementation of the Discrete Fourier Transform (DFT). + + + Complex Fast (FFT) Implementation of the Discrete Fourier Transform (DFT). + + + + + Applies the forward Fast Fourier Transform (FFT) to arbitrary-length sample vectors. + + Sample vector, where the FFT is evaluated in place. + + + + Applies the forward Fast Fourier Transform (FFT) to arbitrary-length sample vectors. + + Sample vector, where the FFT is evaluated in place. + Fourier Transform Convention Options. + + + + Applies the forward Fast Fourier Transform (FFT) to arbitrary-length sample vectors. + + Real part of the sample vector, where the FFT is evaluated in place. + Imaginary part of the sample vector, where the FFT is evaluated in place. + Fourier Transform Convention Options. + + + + Packed Real-Complex forward Fast Fourier Transform (FFT) to arbitrary-length sample vectors. + Since for real-valued time samples the complex spectrum is conjugate-even (symmetry), + the spectrum can be fully reconstructed form the positive frequencies only (first half). + The data array needs to be N+2 (if N is even) or N+1 (if N is odd) long in order to support such a packed spectrum. + + Data array of length N+2 (if N is even) or N+1 (if N is odd). + The number of samples. + Fourier Transform Convention Options. + + + + Applies the forward Fast Fourier Transform (FFT) to multiple dimensional sample data. + + Sample data, where the FFT is evaluated in place. + + The data size per dimension. The first dimension is the major one. + For example, with two dimensions "rows" and "columns" the samples are assumed to be organized row by row. + + Fourier Transform Convention Options. + + + + Applies the forward Fast Fourier Transform (FFT) to two dimensional sample data. + + Sample data, organized row by row, where the FFT is evaluated in place + The number of rows. + The number of columns. + Data available organized column by column instead of row by row can be processed directly by swapping the rows and columns arguments. + Fourier Transform Convention Options. + + + + Applies the forward Fast Fourier Transform (FFT) to a two dimensional data in form of a matrix. + + Sample matrix, where the FFT is evaluated in place + Fourier Transform Convention Options. + + + + Applies the inverse Fast Fourier Transform (iFFT) to arbitrary-length sample vectors. + + Spectrum data, where the iFFT is evaluated in place. + + + + Applies the inverse Fast Fourier Transform (iFFT) to arbitrary-length sample vectors. + + Spectrum data, where the iFFT is evaluated in place. + Fourier Transform Convention Options. + + + + Applies the inverse Fast Fourier Transform (iFFT) to arbitrary-length sample vectors. + + Real part of the sample vector, where the iFFT is evaluated in place. + Imaginary part of the sample vector, where the iFFT is evaluated in place. + Fourier Transform Convention Options. + + + + Packed Real-Complex inverse Fast Fourier Transform (iFFT) to arbitrary-length sample vectors. + Since for real-valued time samples the complex spectrum is conjugate-even (symmetry), + the spectrum can be fully reconstructed form the positive frequencies only (first half). + The data array needs to be N+2 (if N is even) or N+1 (if N is odd) long in order to support such a packed spectrum. + + Data array of length N+2 (if N is even) or N+1 (if N is odd). + The number of samples. + Fourier Transform Convention Options. + + + + Applies the inverse Fast Fourier Transform (iFFT) to multiple dimensional sample data. + + Spectrum data, where the iFFT is evaluated in place. + + The data size per dimension. The first dimension is the major one. + For example, with two dimensions "rows" and "columns" the samples are assumed to be organized row by row. + + Fourier Transform Convention Options. + + + + Applies the inverse Fast Fourier Transform (iFFT) to two dimensional sample data. + + Sample data, organized row by row, where the iFFT is evaluated in place + The number of rows. + The number of columns. + Data available organized column by column instead of row by row can be processed directly by swapping the rows and columns arguments. + Fourier Transform Convention Options. + + + + Applies the inverse Fast Fourier Transform (iFFT) to a two dimensional data in form of a matrix. + + Sample matrix, where the iFFT is evaluated in place + Fourier Transform Convention Options. + + + + Naive forward DFT, useful e.g. to verify faster algorithms. + + Time-space sample vector. + Fourier Transform Convention Options. + Corresponding frequency-space vector. + + + + Naive inverse DFT, useful e.g. to verify faster algorithms. + + Frequency-space sample vector. + Fourier Transform Convention Options. + Corresponding time-space vector. + + + + Radix-2 forward FFT for power-of-two sized sample vectors. + + Sample vector, where the FFT is evaluated in place. + Fourier Transform Convention Options. + + + + + Radix-2 inverse FFT for power-of-two sized sample vectors. + + Sample vector, where the FFT is evaluated in place. + Fourier Transform Convention Options. + + + + + Bluestein forward FFT for arbitrary sized sample vectors. + + Sample vector, where the FFT is evaluated in place. + Fourier Transform Convention Options. + + + + Bluestein inverse FFT for arbitrary sized sample vectors. + + Sample vector, where the FFT is evaluated in place. + Fourier Transform Convention Options. + + + + Extract the exponent sign to be used in forward transforms according to the + provided convention options. + + Fourier Transform Convention Options. + Fourier series exponent sign. + + + + Rescale FFT-the resulting vector according to the provided convention options. + + Fourier Transform Convention Options. + Sample Vector. + + + + Rescale the iFFT-resulting vector according to the provided convention options. + + Fourier Transform Convention Options. + Sample Vector. + + + + Generate the frequencies corresponding to each index in frequency space. + The frequency space has a resolution of sampleRate/N. + Index 0 corresponds to the DC part, the following indices correspond to + the positive frequencies up to the Nyquist frequency (sampleRate/2), + followed by the negative frequencies wrapped around. + + Number of samples. + The sampling rate of the time-space data. + + + + Sequences with length greater than Math.Sqrt(Int32.MaxValue) + 1 + will cause k*k in the Bluestein sequence to overflow (GH-286). + + + + + Generate the bluestein sequence for the provided problem size. + + Number of samples. + Bluestein sequence exp(I*Pi*k^2/N) + + + + Convolution with the bluestein sequence (Parallel Version). + + Sample Vector. + + + + Swap the real and imaginary parts of each sample. + + Sample Vector. + + + + Bluestein generic FFT for arbitrary sized sample vectors. + + Time-space sample vector. + Fourier series exponent sign. + + + + Naive generic DFT, useful e.g. to verify faster algorithms. + + Time-space sample vector. + Fourier series exponent sign. + Corresponding frequency-space vector. + + + + Radix-2 Reorder Helper Method + + Sample type + Sample vector + + + + Radix-2 Step Helper Method + + Sample vector. + Fourier series exponent sign. + Level Group Size. + Index inside of the level. + + + + Radix-2 generic FFT for power-of-two sized sample vectors. + + Sample vector, where the FFT is evaluated in place. + Fourier series exponent sign. + + + + + Radix-2 generic FFT for power-of-two sample vectors (Parallel Version). + + Sample vector, where the FFT is evaluated in place. + Fourier series exponent sign. + + + + + Fast (FHT) Implementation of the Discrete Hartley Transform (DHT). + + + Fast (FHT) Implementation of the Discrete Hartley Transform (DHT). + + + + + Naive forward DHT, useful e.g. to verify faster algorithms. + + Time-space sample vector. + Hartley Transform Convention Options. + Corresponding frequency-space vector. + + + + Naive inverse DHT, useful e.g. to verify faster algorithms. + + Frequency-space sample vector. + Hartley Transform Convention Options. + Corresponding time-space vector. + + + + Rescale FFT-the resulting vector according to the provided convention options. + + Fourier Transform Convention Options. + Sample Vector. + + + + Rescale the iFFT-resulting vector according to the provided convention options. + + Fourier Transform Convention Options. + Sample Vector. + + + + Naive generic DHT, useful e.g. to verify faster algorithms. + + Time-space sample vector. + Corresponding frequency-space vector. + + + + Hartley Transform Convention + + + + + Only scale by 1/N in the inverse direction; No scaling in forward direction. + + + + + Don't scale at all (neither on forward nor on inverse transformation). + + + + + Universal; Symmetric scaling. + + + + + Fourier Transform Convention + + + + + Inverse integrand exponent (forward: positive sign; inverse: negative sign). + + + + + Only scale by 1/N in the inverse direction; No scaling in forward direction. + + + + + Don't scale at all (neither on forward nor on inverse transformation). + + + + + Universal; Symmetric scaling and common exponent (used in Maple). + + + + + Only scale by 1/N in the inverse direction; No scaling in forward direction (used in Matlab). [= AsymmetricScaling] + + + + + Inverse integrand exponent; No scaling at all (used in all Numerical Recipes based implementations). [= InverseExponent | NoScaling] + + + + + Approximates a definite integral using an Nth order Gauss-Legendre rule. Precomputed Gauss-Legendre abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024 are used, otherwise they're calulcated on the fly. + + + + + Initializes a new instance of the class. + + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Defines an Nth order Gauss-Legendre rule. The order also defines the number of abscissas and weights for the rule. Precomputed Gauss-Legendre abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024 are used, otherwise they're calulcated on the fly. + + + + Gettter for the ith abscissa. + + Index of the ith abscissa. + The ith abscissa. + + + + Getter that returns a clone of the array containing the abscissas. + + + + + Getter for the ith weight. + + Index of the ith weight. + The ith weight. + + + + Getter that returns a clone of the array containing the weights. + + + + + Getter for the order. + + + + + Getter for the InvervalBegin. + + + + + Getter for the InvervalEnd. + + + + + Approximates a definite integral using an Nth order Gauss-Legendre rule. + + The analytic smooth function to integrate. + Where the interval starts, exclusive and finite. + Where the interval ends, exclusive and finite. + Defines an Nth order Gauss-Legendre rule. The order also defines the number of abscissas and weights for the rule. Precomputed Gauss-Legendre abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024 are used, otherwise they're calulcated on the fly. + Approximation of the finite integral in the given interval. + + + + Approximates a 2-dimensional definite integral using an Nth order Gauss-Legendre rule over the rectangle [a,b] x [c,d]. + + The 2-dimensional analytic smooth function to integrate. + Where the interval starts for the first (inside) integral, exclusive and finite. + Where the interval ends for the first (inside) integral, exclusive and finite. + Where the interval starts for the second (outside) integral, exclusive and finite. + /// Where the interval ends for the second (outside) integral, exclusive and finite. + Defines an Nth order Gauss-Legendre rule. The order also defines the number of abscissas and weights for the rule. Precomputed Gauss-Legendre abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024 are used, otherwise they're calulcated on the fly. + Approximation of the finite integral in the given interval. + + + + Contains a method to compute the Gauss-Legendre abscissas/weights and precomputed abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024. + + + Contains a method to compute the Gauss-Legendre abscissas/weights and precomputed abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024. + + + + + Precomputed abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024. + + + + + Computes the Gauss-Legendre abscissas/weights. + See Pavel Holoborodko for a description of the algorithm. + + Defines an Nth order Gauss-Legendre rule. The order also defines the number of abscissas and weights for the rule. + Required precision to compute the abscissas/weights. 1e-10 is usually fine. + Object containing the non-negative abscissas/weights, order, and intervalBegin/intervalEnd. The non-negative abscissas/weights are generated over the interval [-1,1] for the given order. + + + + Creates and maps a Gauss-Legendre point. + + + + + Getter for the GaussPoint. + + Defines an Nth order Gauss-Legendre rule. Precomputed Gauss-Legendre abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024 are used, otherwise they're calulcated on the fly. + Object containing the non-negative abscissas/weights, order, and intervalBegin/intervalEnd. The non-negative abscissas/weights are generated over the interval [-1,1] for the given order. + + + + Getter for the GaussPoint. + + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Defines an Nth order Gauss-Legendre rule. Precomputed Gauss-Legendre abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024 are used, otherwise they're calulcated on the fly. + Object containing the abscissas/weights, order, and intervalBegin/intervalEnd. + + + + Maps the non-negative abscissas/weights from the interval [-1, 1] to the interval [intervalBegin, intervalEnd]. + + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Object containing the non-negative abscissas/weights, order, and intervalBegin/intervalEnd. The non-negative abscissas/weights are generated over the interval [-1,1] for the given order. + Object containing the abscissas/weights, order, and intervalBegin/intervalEnd. + + + + Contains the abscissas/weights, order, and intervalBegin/intervalEnd. + + + + + Analytic integration algorithm for smooth functions with no discontinuities + or derivative discontinuities and no poles inside the interval. + + + + + Maximum number of iterations, until the asked + maximum error is (likely to be) satisfied. + + + + + Approximate the integral by the double exponential transformation + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + The expected relative accuracy of the approximation. + Approximation of the finite integral in the given interval. + + + + Compute the abscissa vector for a single level. + + The level to evaluate the abscissa vector for. + Abscissa Vector. + + + + Compute the weight vector for a single level. + + The level to evaluate the weight vector for. + Weight Vector. + + + + Precomputed abscissa vector per level. + + + + + Precomputed weight vector per level. + + + + + Approximation algorithm for definite integrals by Simpson's rule. + + + + + Direct 3-point approximation of the definite integral in the provided interval by Simpson's rule. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Approximation of the finite integral in the given interval. + + + + Composite N-point approximation of the definite integral in the provided interval by Simpson's rule. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Even number of composite subdivision partitions. + Approximation of the finite integral in the given interval. + + + + Approximation algorithm for definite integrals by the Trapezium rule of the Newton-Cotes family. + + + Wikipedia - Trapezium Rule + + + + + Direct 2-point approximation of the definite integral in the provided interval by the trapezium rule. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Approximation of the finite integral in the given interval. + + + + Composite N-point approximation of the definite integral in the provided interval by the trapezium rule. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Number of composite subdivision partitions. + Approximation of the finite integral in the given interval. + + + + Adaptive approximation of the definite integral in the provided interval by the trapezium rule. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + The expected accuracy of the approximation. + Approximation of the finite integral in the given interval. + + + + Adaptive approximation of the definite integral by the trapezium rule. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Abscissa vector per level provider. + Weight vector per level provider. + First Level Step + The expected relative accuracy of the approximation. + Approximation of the finite integral in the given interval. + + + + Barycentric Interpolation Algorithm. + + Supports neither differentiation nor integration. + + + Sample points (N), sorted ascendingly. + Sample values (N), sorted ascendingly by x. + Barycentric weights (N), sorted ascendingly by x. + + + + Create a barycentric polynomial interpolation from a set of (x,y) value pairs with equidistant x, sorted ascendingly by x. + + + + + Create a barycentric polynomial interpolation from an unordered set of (x,y) value pairs with equidistant x. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a barycentric polynomial interpolation from an unsorted set of (x,y) value pairs with equidistant x. + + + + + Create a barycentric polynomial interpolation from a set of values related to linearly/equidistant spaced points within an interval. + + + + + Create a barycentric rational interpolation without poles, using Mike Floater and Kai Hormann's Algorithm. + The values are assumed to be sorted ascendingly by x. + + Sample points (N), sorted ascendingly. + Sample values (N), sorted ascendingly by x. + + Order of the interpolation scheme, 0 <= order <= N. + In most cases a value between 3 and 8 gives good results. + + + + + Create a barycentric rational interpolation without poles, using Mike Floater and Kai Hormann's Algorithm. + WARNING: Works in-place and can thus causes the data array to be reordered. + + Sample points (N), no sorting assumed. + Sample values (N). + + Order of the interpolation scheme, 0 <= order <= N. + In most cases a value between 3 and 8 gives good results. + + + + + Create a barycentric rational interpolation without poles, using Mike Floater and Kai Hormann's Algorithm. + + Sample points (N), no sorting assumed. + Sample values (N). + + Order of the interpolation scheme, 0 <= order <= N. + In most cases a value between 3 and 8 gives good results. + + + + + Create a barycentric rational interpolation without poles, using Mike Floater and Kai Hormann's Algorithm. + The values are assumed to be sorted ascendingly by x. + + Sample points (N), sorted ascendingly. + Sample values (N), sorted ascendingly by x. + + + + Create a barycentric rational interpolation without poles, using Mike Floater and Kai Hormann's Algorithm. + WARNING: Works in-place and can thus causes the data array to be reordered. + + Sample points (N), no sorting assumed. + Sample values (N). + + + + Create a barycentric rational interpolation without poles, using Mike Floater and Kai Hormann's Algorithm. + + Sample points (N), no sorting assumed. + Sample values (N). + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. NOT SUPPORTED. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. NOT SUPPORTED. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. NOT SUPPORTED. + + Point t to integrate at. + + + + Definite integral between points a and b. NOT SUPPORTED. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Cubic Spline Interpolation. + + Supports both differentiation and integration. + + + sample points (N+1), sorted ascending + Zero order spline coefficients (N) + First order spline coefficients (N) + second order spline coefficients (N) + third order spline coefficients (N) + + + + Create a hermite cubic spline interpolation from a set of (x,y) value pairs and their slope (first derivative), sorted ascendingly by x. + + + + + Create a hermite cubic spline interpolation from an unsorted set of (x,y) value pairs and their slope (first derivative). + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a hermite cubic spline interpolation from an unsorted set of (x,y) value pairs and their slope (first derivative). + + + + + Create an Akima cubic spline interpolation from a set of (x,y) value pairs, sorted ascendingly by x. + Akima splines are robust to outliers. + + + + + Create an Akima cubic spline interpolation from an unsorted set of (x,y) value pairs. + Akima splines are robust to outliers. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create an Akima cubic spline interpolation from an unsorted set of (x,y) value pairs. + Akima splines are robust to outliers. + + + + + Create a cubic spline interpolation from a set of (x,y) value pairs, sorted ascendingly by x, + and custom boundary/termination conditions. + + + + + Create a cubic spline interpolation from an unsorted set of (x,y) value pairs and custom boundary/termination conditions. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a cubic spline interpolation from an unsorted set of (x,y) value pairs and custom boundary/termination conditions. + + + + + Create a natural cubic spline interpolation from a set of (x,y) value pairs + and zero second derivatives at the two boundaries, sorted ascendingly by x. + + + + + Create a natural cubic spline interpolation from an unsorted set of (x,y) value pairs + and zero second derivatives at the two boundaries. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a natural cubic spline interpolation from an unsorted set of (x,y) value pairs + and zero second derivatives at the two boundaries. + + + + + Three-Point Differentiation Helper. + + Sample Points t. + Sample Values x(t). + Index of the point of the differentiation. + Index of the first sample. + Index of the second sample. + Index of the third sample. + The derivative approximation. + + + + Tridiagonal Solve Helper. + + The a-vector[n]. + The b-vector[n], will be modified by this function. + The c-vector[n]. + The d-vector[n], will be modified by this function. + The x-vector[n] + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. + + Point t to integrate at. + + + + Definite integral between points a and b. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Find the index of the greatest sample point smaller than t, + or the left index of the closest segment for extrapolation. + + + + + Piece-wise Log-Linear Interpolation + + This algorithm supports differentiation, not integration. + + + + Internal Spline Interpolation + + + + Sample points (N), sorted ascending + Natural logarithm of the sample values (N) at the corresponding points + + + + Create a piecewise log-linear interpolation from a set of (x,y) value pairs, sorted ascendingly by x. + + + + + Create a piecewise log-linear interpolation from an unsorted set of (x,y) value pairs. + WARNING: Works in-place and can thus causes the data array to be reordered and modified. + + + + + Create a piecewise log-linear interpolation from an unsorted set of (x,y) value pairs. + + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. + + Point t to integrate at. + + + + Definite integral between points a and b. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Quadratic Spline Interpolation. + + Supports both differentiation and integration. + + + sample points (N+1), sorted ascending + Zero order spline coefficients (N) + First order spline coefficients (N) + second order spline coefficients (N) + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. + + Point t to integrate at. + + + + Definite integral between points a and b. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Find the index of the greatest sample point smaller than t, + or the left index of the closest segment for extrapolation. + + + + + A step function where the start of each segment is included, and the last segment is open-ended. + Segment i is [x_i, x_i+1) for i < N, or [x_i, infinity] for i = N. + The domain of the function is all real numbers, such that y = 0 where x <. + + Supports both differentiation and integration. + + + Sample points (N), sorted ascending + Samples values (N) of each segment starting at the corresponding sample point. + + + + Create a linear spline interpolation from a set of (x,y) value pairs, sorted ascendingly by x. + + + + + Create a linear spline interpolation from an unsorted set of (x,y) value pairs. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a linear spline interpolation from an unsorted set of (x,y) value pairs. + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. + + Point t to integrate at. + + + + Definite integral between points a and b. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Find the index of the greatest sample point smaller than t. + + + + + Wraps an interpolation with a transformation of the interpolated values. + + Neither differentiation nor integration is supported. + + + + Create a linear spline interpolation from a set of (x,y) value pairs, sorted ascendingly by x. + + + + + Create a linear spline interpolation from an unsorted set of (x,y) value pairs. + WARNING: Works in-place and can thus causes the data array to be reordered and modified. + + + + + Create a linear spline interpolation from an unsorted set of (x,y) value pairs. + + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. NOT SUPPORTED. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. NOT SUPPORTED. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. NOT SUPPORTED. + + Point t to integrate at. + + + + Definite integral between points a and b. NOT SUPPORTED. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Rational Interpolation (with poles) using Roland Bulirsch and Josef Stoer's Algorithm. + + + + This algorithm supports neither differentiation nor integration. + + + + + Sample Points t, sorted ascendingly. + Sample Values x(t), sorted ascendingly by x. + + + + Create a Bulirsch-Stoer rational interpolation from a set of (x,y) value pairs, sorted ascendingly by x. + + + + + Create a Bulirsch-Stoer rational interpolation from an unsorted set of (x,y) value pairs. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a Bulirsch-Stoer rational interpolation from an unsorted set of (x,y) value pairs. + + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. NOT SUPPORTED. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. NOT SUPPORTED. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. NOT SUPPORTED. + + Point t to integrate at. + + + + Definite integral between points a and b. NOT SUPPORTED. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Piece-wise Linear Interpolation. + + Supports both differentiation and integration. + + + Sample points (N+1), sorted ascending + Sample values (N or N+1) at the corresponding points; intercept, zero order coefficients + Slopes (N) at the sample points (first order coefficients): N + + + + Create a linear spline interpolation from a set of (x,y) value pairs, sorted ascendingly by x. + + + + + Create a linear spline interpolation from an unsorted set of (x,y) value pairs. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a linear spline interpolation from an unsorted set of (x,y) value pairs. + + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. + + Point t to integrate at. + + + + Definite integral between points a and b. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Find the index of the greatest sample point smaller than t, + or the left index of the closest segment for extrapolation. + + + + + Lagrange Polynomial Interpolation using Neville's Algorithm. + + + + This algorithm supports differentiation, but doesn't support integration. + + + When working with equidistant or Chebyshev sample points it is + recommended to use the barycentric algorithms specialized for + these cases instead of this arbitrary Neville algorithm. + + + + + Sample Points t, sorted ascendingly. + Sample Values x(t), sorted ascendingly by x. + + + + Create a Neville polynomial interpolation from a set of (x,y) value pairs, sorted ascendingly by x. + + + + + Create a Neville polynomial interpolation from an unsorted set of (x,y) value pairs. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a Neville polynomial interpolation from an unsorted set of (x,y) value pairs. + + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. NOT SUPPORTED. + + Point t to integrate at. + + + + Definite integral between points a and b. NOT SUPPORTED. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Interpolation within the range of a discrete set of known data points. + + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. + + Point t to integrate at. + + + + Definite integral between points a and b. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Left and right boundary conditions. + + + + + Natural Boundary (Zero second derivative). + + + + + Parabolically Terminated boundary. + + + + + Fixed first derivative at the boundary. + + + + + Fixed second derivative at the boundary. + + + + + Create a new matrix straight from an initialized matrix storage instance. + If you have an instance of a discrete storage type instead, use their direct methods instead. + + + + + Create a new matrix with the same kind of the provided example. + + + + + Create a new matrix with the same kind and dimensions of the provided example. + + + + + Create a new matrix with the same kind of the provided example. + + + + + Create a new matrix with a type that can represent and is closest to both provided samples. + + + + + Create a new matrix with a type that can represent and is closest to both provided samples and the dimensions of example. + + + + + Create a new dense matrix with values sampled from the provided random distribution. + + + + + Create a new dense matrix with values sampled from the standard distribution with a system random source. + + + + + Create a new dense matrix with values sampled from the standard distribution with a system random source. + + + + + Create a new positive definite dense matrix where each value is the product + of two samples from the provided random distribution. + + + + + Create a new positive definite dense matrix where each value is the product + of two samples from the standard distribution. + + + + + Create a new positive definite dense matrix where each value is the product + of two samples from the provided random distribution. + + + + + Create a new dense matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + + + + Create a new dense matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to be in column-major order (column by column) and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + + Create a new dense matrix and initialize each value to the same provided value. + + + + + Create a new dense matrix and initialize each value using the provided init function. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value using the provided init function. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new dense matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable. + The enumerable is assumed to be in column-major order (column by column). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix from a 2D array of existing matrices. + The matrices in the array are not required to be dense already. + If the matrices do not align properly, they are placed on the top left + corner of their cell with the remaining fields left zero. + + + + + Create a new sparse matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a sparse matrix of T with the given number of rows and columns. + + The number of rows. + The number of columns. + + + + Create a new sparse matrix and initialize each value to the same provided value. + + + + + Create a new sparse matrix and initialize each value using the provided init function. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value using the provided init function. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new sparse matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable. + The enumerable is assumed to be in row-major order (row by row). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + + Create a new sparse matrix with the given number of rows and columns as a copy of the given array. + The array is assumed to be in column-major order (column by column). + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix from a 2D array of existing matrices. + The matrices in the array are not required to be sparse already. + If the matrices do not align properly, they are placed on the top left + corner of their cell with the remaining fields left zero. + + + + + Create a new diagonal matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + + + + Create a new diagonal matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to represent the diagonal values and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new square diagonal matrix directly binding to a raw array. + The array is assumed to represent the diagonal values and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new diagonal matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal matrix and initialize each diagonal value using the provided init function. + + + + + Create a new diagonal identity matrix with a one-diagonal. + + + + + Create a new diagonal identity matrix with a one-diagonal. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Existing data may not be all zeros, so clearing may be necessary + if not all of it will be overwritten anyway. + + + + + If existing data is assumed to be all zeros already, + clearing it may be skipped if applicable. + + + + + Allow skipping zero entries (without enforcing skipping them). + When enumerating sparse matrices this can significantly speed up operations. + + + + + Force applying the operation to all fields even if they are zero. + + + + + It is not known yet whether a matrix is symmetric or not. + + + + + A matrix is symmetric + + + + + A matrix is hermitian (conjugate symmetric). + + + + + A matrix is not symmetric + + + + + Stop criterion that delegates the status determination to a delegate. + + + + + Create a new instance of this criterion with a custom implementation. + + Custom implementation with the same signature and semantics as the DetermineStatus method. + + + + Determines the status of the iterative calculation by delegating it to the provided delegate. + Result is set into Status field. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual stop criteria may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Gets the current calculation status. + + + + + Resets the IIterationStopCriterion to the pre-calculation state. + + + + + Clones this criterion and its settings. + + + + + Defines an that uses a cancellation token as stop criterion. + + + + + Initializes a new instance of the class. + + + + + Initializes a new instance of the class. + + + + + Determines the status of the iterative calculation based on the stop criteria stored + by the current . Result is set into Status field. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual stop criteria may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Gets the current calculation status. + + + + + Resets the to the pre-calculation state. + + + + + Clones the current and its settings. + + A new instance of the class. + + + + Iterative Calculation Status + + + + + An iterator that is used to check if an iterative calculation should continue or stop. + + + + + The collection that holds all the stop criteria and the flag indicating if they should be added + to the child iterators. + + + + + The status of the iterator. + + + + + Initializes a new instance of the class with the default stop criteria. + + + + + Initializes a new instance of the class with the specified stop criteria. + + + The specified stop criteria. Only one stop criterion of each type can be passed in. None + of the stop criteria will be passed on to child iterators. + + + + + Initializes a new instance of the class with the specified stop criteria. + + + The specified stop criteria. Only one stop criterion of each type can be passed in. None + of the stop criteria will be passed on to child iterators. + + + + + Gets the current calculation status. + + + + + Determines the status of the iterative calculation based on the stop criteria stored + by the current . Result is set into Status field. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual iterators may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Indicates to the iterator that the iterative process has been cancelled. + + + Does not reset the stop-criteria. + + + + + Resets the to the pre-calculation state. + + + + + Creates a deep clone of the current iterator. + + The deep clone of the current iterator. + + + + Defines an that monitors the numbers of iteration + steps as stop criterion. + + + + + The default value for the maximum number of iterations the process is allowed + to perform. + + + + + The maximum number of iterations the calculation is allowed to perform. + + + + + The status of the calculation + + + + + Initializes a new instance of the class with the default maximum + number of iterations. + + + + + Initializes a new instance of the class with the specified maximum + number of iterations. + + The maximum number of iterations the calculation is allowed to perform. + + + + Gets or sets the maximum number of iterations the calculation is allowed to perform. + + Thrown if the Maximum is set to a negative value. + + + + Returns the maximum number of iterations to the default. + + + + + Determines the status of the iterative calculation based on the stop criteria stored + by the current . Result is set into Status field. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual stop criteria may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Gets the current calculation status. + + + + + Resets the to the pre-calculation state. + + + + + Clones the current and its settings. + + A new instance of the class. + + + + Loads the available objects from the specified assembly. + + The assembly which will be searched for setup objects. + If true, types that fail to load are simply ignored. Otherwise the exception is rethrown. + The types that should not be loaded. + + + + Loads the available objects from the specified assembly. + + The type in the assembly which should be searched for setup objects. + If true, types that fail to load are simply ignored. Otherwise the exception is rethrown. + The types that should not be loaded. + + + + Loads the available objects from the specified assembly. + + The of the assembly that should be searched for setup objects. + If true, types that fail to load are simply ignored. Otherwise the exception is rethrown. + The types that should not be loaded. + + + + Loads the available objects from the Math.NET Numerics assembly. + + The types that should not be loaded. + + + + Loads the available objects from the Math.NET Numerics assembly. + + + + + Defines the interface for classes that solve the matrix equation Ax = b in + an iterative manner. + + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + Defines the interface for objects that can create an iterative solver with + specific settings. This interface is used to pass iterative solver creation + setup information around. + + + + + Gets the type of the solver that will be created by this setup object. + + + + + Gets type of preconditioner, if any, that will be created by this setup object. + + + + + Creates the iterative solver to be used. + + + + + Creates the preconditioner to be used by default (can be overwritten). + + + + + Gets the relative speed of the solver. + + Returns a value between 0 and 1, inclusive. + + + + Gets the relative reliability of the solver. + + Returns a value between 0 and 1 inclusive. + + + + The base interface for preconditioner classes. + + + + Preconditioners are used by iterative solvers to improve the convergence + speed of the solving process. Increase in convergence speed + is related to the number of iterations necessary to get a converged solution. + So while in general the use of a preconditioner means that the iterative + solver will perform fewer iterations it does not guarantee that the actual + solution time decreases given that some preconditioners can be expensive to + setup and run. + + + Note that in general changes to the matrix will invalidate the preconditioner + if the changes occur after creating the preconditioner. + + + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix on which the preconditioner is based. + + + + Approximates the solution to the matrix equation Mx = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + The base interface for classes that provide stop criteria for iterative calculations. + + + + + Determines the status of the iterative calculation based on the stop criteria stored + by the current IIterationStopCriterion. Status is set to Status field of current object. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual stop criteria may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Gets the current calculation status. + + is not a legal value. Status should be set in implementation. + + + + Resets the IIterationStopCriterion to the pre-calculation state. + + To implementers: Invoking this method should not clear the user defined + property values, only the state that is used to track the progress of the + calculation. + + + + Monitors an iterative calculation for signs of divergence. + + + + + The maximum relative increase the residual may experience without triggering a divergence warning. + + + + + The number of iterations over which a residual increase should be tracked before issuing a divergence warning. + + + + + The status of the calculation + + + + + The array that holds the tracking information. + + + + + The iteration number of the last iteration. + + + + + Initializes a new instance of the class with the specified maximum + relative increase and the specified minimum number of tracking iterations. + + The maximum relative increase that the residual may experience before a divergence warning is issued. + The minimum number of iterations over which the residual must grow before a divergence warning is issued. + + + + Gets or sets the maximum relative increase that the residual may experience before a divergence warning is issued. + + Thrown if the Maximum is set to zero or below. + + + + Gets or sets the minimum number of iterations over which the residual must grow before + issuing a divergence warning. + + Thrown if the value is set to less than one. + + + + Determines the status of the iterative calculation based on the stop criteria stored + by the current . Result is set into Status field. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual stop criteria may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Detect if solution is diverging + + true if diverging, otherwise false + + + + Gets required history Length + + + + + Gets the current calculation status. + + + + + Resets the to the pre-calculation state. + + + + + Clones the current and its settings. + + A new instance of the class. + + + + Defines an that monitors residuals for NaN's. + + + + + The status of the calculation + + + + + The iteration number of the last iteration. + + + + + Determines the status of the iterative calculation based on the stop criteria stored + by the current . Result is set into Status field. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual stop criteria may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Gets the current calculation status. + + + + + Resets the to the pre-calculation state. + + + + + Clones the current and its settings. + + A new instance of the class. + + + + A unit preconditioner. This preconditioner does not actually do anything + it is only used when running an without + a preconditioner. + + + + + The coefficient matrix on which this preconditioner operates. + Is used to check dimensions on the different vectors that are processed. + + + + + Initializes the preconditioner and loads the internal data structures. + + + The matrix upon which the preconditioner is based. + + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + If and do not have the same size. + + + - or - + + + If the size of is different the number of rows of the coefficient matrix. + + + + + + Defines an that monitors residuals as stop criterion. + + + + + The maximum value for the residual below which the calculation is considered converged. + + + + + The minimum number of iterations for which the residual has to be below the maximum before + the calculation is considered converged. + + + + + The status of the calculation + + + + + The number of iterations since the residuals got below the maximum. + + + + + The iteration number of the last iteration. + + + + + Initializes a new instance of the class with the specified + maximum residual and minimum number of iterations. + + + The maximum value for the residual below which the calculation is considered converged. + + + The minimum number of iterations for which the residual has to be below the maximum before + the calculation is considered converged. + + + + + Gets or sets the maximum value for the residual below which the calculation is considered + converged. + + Thrown if the Maximum is set to a negative value. + + + + Gets or sets the minimum number of iterations for which the residual has to be + below the maximum before the calculation is considered converged. + + Thrown if the BelowMaximumFor is set to a value less than 1. + + + + Determines the status of the iterative calculation based on the stop criteria stored + by the current . Result is set into Status field. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual stop criteria may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Gets the current calculation status. + + + + + Resets the to the pre-calculation state. + + + + + Clones the current and its settings. + + A new instance of the class. + + + + Create a new vector straight from an initialized matrix storage instance. + If you have an instance of a discrete storage type instead, use their direct methods instead. + + + + + Create a new vector with the same kind of the provided example. + + + + + Create a new vector with the same kind and dimension of the provided example. + + + + + Create a new vector with the same kind of the provided example. + + + + + Create a new vector with a type that can represent and is closest to both provided samples. + + + + + Create a new vector with a type that can represent and is closest to both provided samples and the dimensions of example. + + + + + Create a new vector with a type that can represent and is closest to both provided samples. + + + + + Create a new dense vector with values sampled from the provided random distribution. + + + + + Create a new dense vector with values sampled from the standard distribution with a system random source. + + + + + Create a new dense vector with values sampled from the standard distribution with a system random source. + + + + + Create a new dense vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a dense vector of T with the given size. + + The size of the vector. + + + + Create a dense vector of T that is directly bound to the specified array. + + + + + Create a new dense vector and initialize each value using the provided value. + + + + + Create a new dense vector and initialize each value using the provided init function. + + + + + Create a new dense vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a sparse vector of T with the given size. + + The size of the vector. + + + + Create a new sparse vector and initialize each value using the provided value. + + + + + Create a new sparse vector and initialize each value using the provided init function. + + + + + Create a new sparse vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Converts a matrix to single precision. + + + + + Converts a matrix to double precision. + + + + + Converts a matrix to single precision complex numbers. + + + + + Converts a matrix to double precision complex numbers. + + + + + Gets a single precision complex matrix with the real parts from the given matrix. + + + + + Gets a double precision complex matrix with the real parts from the given matrix. + + + + + Gets a real matrix representing the real parts of a complex matrix. + + + + + Gets a real matrix representing the real parts of a complex matrix. + + + + + Gets a real matrix representing the imaginary parts of a complex matrix. + + + + + Gets a real matrix representing the imaginary parts of a complex matrix. + + + + + Converts a vector to single precision. + + + + + Converts a vector to double precision. + + + + + Converts a vector to single precision complex numbers. + + + + + Converts a vector to double precision complex numbers. + + + + + Gets a single precision complex vector with the real parts from the given vector. + + + + + Gets a double precision complex vector with the real parts from the given vector. + + + + + Gets a real vector representing the real parts of a complex vector. + + + + + Gets a real vector representing the real parts of a complex vector. + + + + + Gets a real vector representing the imaginary parts of a complex vector. + + + + + Gets a real vector representing the imaginary parts of a complex vector. + + + + + A simple milu(0) preconditioner. + + + Original Fortran code by Youcef Saad (07 January 2004) + + + + Use modified or standard ILU(0) + + + + Gets or sets a value indicating whether to use modified or standard ILU(0). + + + + + Gets a value indicating whether the preconditioner is initialized. + + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square or is not an + instance of SparseCompressedRowMatrixStorage. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector b. + The left hand side vector x. + + + + MILU0 is a simple milu(0) preconditioner. + + Order of the matrix. + Matrix values in CSR format (input). + Column indices (input). + Row pointers (input). + Matrix values in MSR format (output). + Row pointers and column indices (output). + Pointer to diagonal elements (output). + True if the modified/MILU algorithm should be used (recommended) + Returns 0 on success or k > 0 if a zero pivot was encountered at step k. + + + + A Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Bi-Conjugate Gradient Stabilized (BiCGStab) solver is an 'improvement' + of the standard Conjugate Gradient (CG) solver. Unlike the CG solver the + BiCGStab can be used on non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The Bi-CGSTAB algorithm was taken from:
+ Templates for the solution of linear systems: Building blocks + for iterative methods +
+ Richard Barrett, Michael Berry, Tony F. Chan, James Demmel, + June M. Donato, Jack Dongarra, Victor Eijkhout, Roldan Pozo, + Charles Romine and Henk van der Vorst +
+ Url: http://www.netlib.org/templates/Templates.html +
+ Algorithm is described in Chapter 2, section 2.3.8, page 27 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient , A. + The solution , b. + The result , x. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A composite matrix solver. The actual solver is made by a sequence of + matrix solvers. + + + + Solver based on:
+ Faster PDE-based simulations using robust composite linear solvers
+ S. Bhowmicka, P. Raghavan a,*, L. McInnes b, B. Norris
+ Future Generation Computer Systems, Vol 20, 2004, pp 373–387
+
+ + Note that if an iterator is passed to this solver it will be used for all the sub-solvers. + +
+
+ + + The collection of solvers that will be used + + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Generalized Product Bi-Conjugate Gradient iterative matrix solver. + + + + The Generalized Product Bi-Conjugate Gradient (GPBiCG) solver is an + alternative version of the Bi-Conjugate Gradient stabilized (CG) solver. + Unlike the CG solver the GPBiCG solver can be used on + non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The GPBiCG algorithm was taken from:
+ GPBiCG(m,l): A hybrid of BiCGSTAB and GPBiCG methods with + efficiency and robustness +
+ S. Fujino +
+ Applied Numerical Mathematics, Volume 41, 2002, pp 107 - 117 +
+
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Indicates the number of BiCGStab steps should be taken + before switching. + + + + + Indicates the number of GPBiCG steps should be taken + before switching. + + + + + Gets or sets the number of steps taken with the BiCgStab algorithm + before switching over to the GPBiCG algorithm. + + + + + Gets or sets the number of steps taken with the GPBiCG algorithm + before switching over to the BiCgStab algorithm. + + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Decide if to do steps with BiCgStab + + Number of iteration + true if yes, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Multiple-Lanczos Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Multiple-Lanczos Bi-Conjugate Gradient stabilized (ML(k)-BiCGStab) solver is an 'improvement' + of the standard BiCgStab solver. + + + The algorithm was taken from:
+ ML(k)BiCGSTAB: A BiCGSTAB variant based on multiple Lanczos starting vectors +
+ Man-chung Yeung and Tony F. Chan +
+ SIAM Journal of Scientific Computing +
+ Volume 21, Number 4, pp. 1263 - 1290 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + The default number of starting vectors. + + + + + The collection of starting vectors which are used as the basis for the Krylov sub-space. + + + + + The number of starting vectors used by the algorithm + + + + + Gets or sets the number of starting vectors. + + + Must be larger than 1 and smaller than the number of variables in the matrix that + for which this solver will be used. + + + + + Resets the number of starting vectors to the default value. + + + + + Gets or sets a series of orthonormal vectors which will be used as basis for the + Krylov sub-space. + + + + + Gets the number of starting vectors to create + + Maximum number + Number of variables + Number of starting vectors to create + + + + Returns an array of starting vectors. + + The maximum number of starting vectors that should be created. + The number of variables. + + An array with starting vectors. The array will never be larger than the + but it may be smaller if + the is smaller than + the . + + + + + Create random vecrors array + + Number of vectors + Size of each vector + Array of random vectors + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Source A. + Residual data. + x data. + b data. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Transpose Free Quasi-Minimal Residual (TFQMR) iterative matrix solver. + + + + The TFQMR algorithm was taken from:
+ Iterative methods for sparse linear systems. +
+ Yousef Saad +
+ Algorithm is described in Chapter 7, section 7.4.3, page 219 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Is even? + + Number to check + true if even, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A diagonal preconditioner. The preconditioner uses the inverse + of the matrix diagonal as preconditioning values. + + + + + The inverse of the matrix diagonal. + + + + + Returns the decomposed matrix diagonal. + + The matrix diagonal. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + This class performs an Incomplete LU factorization with drop tolerance + and partial pivoting. The drop tolerance indicates which additional entries + will be dropped from the factorized LU matrices. + + + The ILUTP-Mem algorithm was taken from:
+ ILUTP_Mem: a Space-Efficient Incomplete LU Preconditioner +
+ Tzu-Yi Chen, Department of Mathematics and Computer Science,
+ Pomona College, Claremont CA 91711, USA
+ Published in:
+ Lecture Notes in Computer Science
+ Volume 3046 / 2004
+ pp. 20 - 28
+ Algorithm is described in Section 2, page 22 +
+
+ + + The default fill level. + + + + + The default drop tolerance. + + + + + The decomposed upper triangular matrix. + + + + + The decomposed lower triangular matrix. + + + + + The array containing the pivot values. + + + + + The fill level. + + + + + The drop tolerance. + + + + + The pivot tolerance. + + + + + Initializes a new instance of the class with the default settings. + + + + + Initializes a new instance of the class with the specified settings. + + + The amount of fill that is allowed in the matrix. The value is a fraction of + the number of non-zero entries in the original matrix. Values should be positive. + + + The absolute drop tolerance which indicates below what absolute value an entry + will be dropped from the matrix. A drop tolerance of 0.0 means that no values + will be dropped. Values should always be positive. + + + The pivot tolerance which indicates at what level pivoting will take place. A + value of 0.0 means that no pivoting will take place. + + + + + Gets or sets the amount of fill that is allowed in the matrix. The + value is a fraction of the number of non-zero entries in the original + matrix. The standard value is 200. + + + + Values should always be positive and can be higher than 1.0. A value lower + than 1.0 means that the eventual preconditioner matrix will have fewer + non-zero entries as the original matrix. A value higher than 1.0 means that + the eventual preconditioner can have more non-zero values than the original + matrix. + + + Note that any changes to the FillLevel after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the absolute drop tolerance which indicates below what absolute value + an entry will be dropped from the matrix. The standard value is 0.0001. + + + + The values should always be positive and can be larger than 1.0. A low value will + keep more small numbers in the preconditioner matrix. A high value will remove + more small numbers from the preconditioner matrix. + + + Note that any changes to the DropTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the pivot tolerance which indicates at what level pivoting will + take place. The standard value is 0.0 which means pivoting will never take place. + + + + The pivot tolerance is used to calculate if pivoting is necessary. Pivoting + will take place if any of the values in a row is bigger than the + diagonal value of that row divided by the pivot tolerance, i.e. pivoting + will take place if row(i,j) > row(i,i) / PivotTolerance for + any j that is not equal to i. + + + Note that any changes to the PivotTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the lower triagonal elements. + + + + Returns the pivot array. This array is not needed for normal use because + the preconditioner will return the solution vector values in the proper order. + + + This method is used for debugging purposes only and should normally not be used. + + The pivot array. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. Note that the + method takes a general matrix type. However internally the data is stored + as a sparse matrix. Therefore it is not recommended to pass a dense matrix. + + If is . + If is not a square matrix. + + + + Pivot elements in the according to internal pivot array + + Row to pivot in + + + + Was pivoting already performed + + Pivots already done + Current item to pivot + true if performed, otherwise false + + + + Swap columns in the + + Source . + First column index to swap + Second column index to swap + + + + Sort vector descending, not changing vector but placing sorted indicies to + + Start sort form + Sort till upper bound + Array with sorted vector indicies + Source + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + Pivot elements in according to internal pivot array + + Source . + Result after pivoting. + + + + An element sort algorithm for the class. + + + This sort algorithm is used to sort the columns in a sparse matrix based on + the value of the element on the diagonal of the matrix. + + + + + Sorts the elements of the vector in decreasing + fashion. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Sorts the elements of the vector in decreasing + fashion using heap sort algorithm. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Build heap for double indicies + + Root position + Length of + Indicies of + Target + + + + Sift double indicies + + Indicies of + Target + Root position + Length of + + + + Sorts the given integers in a decreasing fashion. + + The values. + + + + Sort the given integers in a decreasing fashion using heapsort algorithm + + Array of values to sort + Length of + + + + Build heap + + Target values array + Root position + Length of + + + + Sift values + + Target value array + Root position + Length of + + + + Exchange values in array + + Target values array + First value to exchange + Second value to exchange + + + + An incomplete, level 0, LU factorization preconditioner. + + + The ILU(0) algorithm was taken from:
+ Iterative methods for sparse linear systems
+ Yousef Saad
+ Algorithm is described in Chapter 10, section 10.3.2, page 275
+
+
+ + + The matrix holding the lower (L) and upper (U) matrices. The + decomposition matrices are combined to reduce storage. + + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + A new matrix containing the lower triagonal elements. + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + A class which encapsulates the functionality of a Cholesky factorization. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Gets the determinant of the matrix for which the Cholesky matrix was computed. + + + + + Gets the log determinant of the matrix for which the Cholesky matrix was computed. + + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + Matrix V is encoded in the property EigenVectors in the way that: + - column corresponding to real eigenvalue represents real eigenvector, + - columns corresponding to the pair of complex conjugate eigenvalues + lambda[i] and lambda[i+1] encode real and imaginary parts of eigenvectors. + + + + + Gets the absolute value of determinant of the square matrix for which the EVD was computed. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A (m x n) may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + If a factorization is performed, the resulting Q matrix is an m x m matrix + and the R matrix is an m x n matrix. If a factorization is performed, the + resulting Q matrix is an m x n matrix and the R matrix is an n x n matrix. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD). + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets the two norm of the . + + The 2-norm of the . + + + + Gets the condition number max(S) / min(S) + + The condition number. + + + + Gets the determinant of the square matrix for which the SVD was computed. + + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + In the Math.Net implementation we also store a set of pivot elements for increased + numerical stability. The pivot elements encode a permutation matrix P such that P*A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Gets the determinant of the matrix for which the LU factorization was computed. + + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an orthogonal matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Factorize matrix using the modified Gram-Schmidt method. + + Initial matrix. On exit is replaced by Q. + Number of rows in Q. + Number of columns in Q. + On exit is filled by R. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Symmetric Householder reduction to tridiagonal form. + + Data array of matrix V (eigenvectors) + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tred2 by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + Data array of matrix V (eigenvectors) + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Nonsymmetric reduction to Hessenberg form. + + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Complex scalar division X/Y. + + Real part of X + Imaginary part of X + Real part of Y + Imaginary part of Y + Division result as a number. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Symmetric Householder reduction to tridiagonal form. + + The eigen vectors to work on. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tred2 by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + The eigen vectors to work on. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Nonsymmetric reduction to Hessenberg form. + + The eigen vectors to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + The eigen vectors to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Complex scalar division X/Y. + + Real part of X + Imaginary part of X + Real part of Y + Imaginary part of Y + Division result as a number. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of a Cholesky factorization for dense matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Gets or sets Tau vector. Contains additional information on Q - used for native solver. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The type of QR factorization to perform. + If is null. + If row count is less then column count + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + If SVD algorithm failed to converge with matrix . + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an orthogonal matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of a Cholesky factorization for user matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The QR factorization method to use. + If is null. + + + + Generate column from initial matrix to work array + + Initial matrix + The first row + Column index + Generated vector + + + + Perform calculation of Q or R + + Work array + Q or R matrices + The first row + The last row + The first column + The last column + Number of available CPUs + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + + + + + Calculates absolute value of multiplied on signum function of + + Double value z1 + Double value z2 + Result multiplication of signum function and absolute value + + + + Swap column and + + Source matrix + The number of rows in + Column A index to swap + Column B index to swap + + + + Scale column by starting from row + + Source matrix + The number of rows in + Column to scale + Row to scale from + Scale value + + + + Scale vector by starting from index + + Source vector + Row to scale from + Scale value + + + + Given the Cartesian coordinates (da, db) of a point p, these fucntion return the parameters da, db, c, and s + associated with the Givens rotation that zeros the y-coordinate of the point. + + Provides the x-coordinate of the point p. On exit contains the parameter r associated with the Givens rotation + Provides the y-coordinate of the point p. On exit contains the parameter z associated with the Givens rotation + Contains the parameter c associated with the Givens rotation + Contains the parameter s associated with the Givens rotation + This is equivalent to the DROTG LAPACK routine. + + + + Calculate Norm 2 of the column in matrix starting from row + + Source matrix + The number of rows in + Column index + Start row index + Norm2 (Euclidean norm) of the column + + + + Calculate Norm 2 of the vector starting from index + + Source vector + Start index + Norm2 (Euclidean norm) of the vector + + + + Calculate dot product of and + + Source matrix + The number of rows in + Index of column A + Index of column B + Starting row index + Dot product value + + + + Performs rotation of points in the plane. Given two vectors x and y , + each vector element of these vectors is replaced as follows: x(i) = c*x(i) + s*y(i); y(i) = c*y(i) - s*x(i) + + Source matrix + The number of rows in + Index of column A + Index of column B + Scalar "c" value + Scalar "s" value + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A Matrix class with dense storage. The underlying storage is a one dimensional array in column-major order (column by column). + + + + + Number of rows. + + Using this instead of the RowCount property to speed up calculating + a matrix index in the data array. + + + + Number of columns. + + Using this instead of the ColumnCount property to speed up calculating + a matrix index in the data array. + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new dense matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new dense matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to be in column-major order (column by column) and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + + Create a new dense matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable. + The enumerable is assumed to be in column-major order (column by column). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix and initialize each value to the same provided value. + + + + + Create a new dense matrix and initialize each value using the provided init function. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new dense matrix with values sampled from the provided random distribution. + + + + + Gets the matrix's data. + + The matrix's data. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of add + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the matrix and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Evaluates whether this matrix is symmetric. + + + + + A matrix type for diagonal matrices. + + + Diagonal matrices can be non-square matrices but the diagonal always starts + at element 0,0. A diagonal matrix will throw an exception if non diagonal + entries are set. The exception to this is when the off diagonal elements are + 0.0 or NaN; these settings will cause no change to the diagonal matrix. + + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new diagonal matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All diagonal cells of the matrix will be initialized to the provided value, all non-diagonal ones to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to contain the diagonal elements only and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new diagonal matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + The matrix to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + The array to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided enumerable. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new diagonal matrix with diagonal values sampled from the provided random distribution. + + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + If the result matrix's dimensions are not the same as this matrix. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to add. + The matrix to store the result of the division. + + + + Computes the determinant of this matrix. + + The determinant of this matrix. + + + + Returns the elements of the diagonal in a . + + The elements of the diagonal. + For non-square matrices, the method returns Min(Rows, Columns) elements where + i == j (i is the row index, and j is the column index). + + + + Copies the values of the given array to the diagonal. + + The array to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + + Copies the values of the given to the diagonal. + + The vector to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced L2 norm of the matrix. + The largest singular value of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + Calculates the condition number of this matrix. + The condition number of the matrix. + + + Computes the inverse of this matrix. + If is not a square matrix. + If is singular. + The inverse of this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Creates a matrix that contains the values from the requested sub-matrix. + + The row to start copying from. + The number of rows to copy. Must be positive. + The column to start copying from. + The number of columns to copy. Must be positive. + The requested sub-matrix. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + If or + is not positive. + + + + Permute the columns of a matrix according to a permutation. + + The column permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Permute the rows of a matrix according to a permutation. + + The row permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Evaluates whether this matrix is symmetric. + + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + double version of the class. + + + + + Initializes a new instance of the Matrix class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Returns the conjugate transpose of this matrix. + + The conjugate transpose of this matrix. + + + + Puts the conjugate transpose of this matrix into the result matrix. + + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to divide by each element of the matrix. + The matrix to store the result of the division. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The matrix to store the result of the pointwise power. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Computes the Moore-Penrose Pseudo-Inverse of this matrix. + + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Calculates the p-norms of all row vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the p-norms of all column vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all row vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all column vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the value sum of each row vector. + + + + + Calculates the absolute value sum of each row vector. + + + + + Calculates the value sum of each column vector. + + + + + Calculates the absolute value sum of each column vector. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A Matrix with sparse storage, intended for very large matrices where most of the cells are zero. + The underlying storage scheme is 3-array compressed-sparse-row (CSR) Format. + Wikipedia - CSR. + + + + + Gets the number of non zero elements in the matrix. + + The number of non zero elements. + + + + Create a new sparse matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new sparse matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable. + The enumerable is assumed to be in row-major order (row by row). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + + Create a new sparse matrix with the given number of rows and columns as a copy of the given array. + The array is assumed to be in column-major order (column by column). + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix and initialize each value to the same provided value. + + + + + Create a new sparse matrix and initialize each value using the provided init function. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Evaluates whether this matrix is symmetric. + + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + double version of the class. + + + + + Initializes a new instance of the Vector class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Conjugates vector and save result to + + Target vector + + + + Negates vector and saves result to + + Target vector + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Divides each element of the vector by a scalar and stores the result in the result vector. + + + The scalar to divide with. + + + The vector to store the result of the division. + + + + + Divides a scalar by each element of the vector and stores the result in the result vector. + + The scalar to divide. + The vector to store the result of the division. + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise raise this vector to an exponent and store the result into the result vector. + + The exponent to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Returns the value of the absolute minimum element. + + The value of the absolute minimum element. + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the value of the absolute maximum element. + + The value of the absolute maximum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + + The p value. + + + Scalar ret = ( ∑|At(i)|^p )^(1/p) + + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Normalizes this vector to a unit vector with respect to the p-norm. + + + The p value. + + + This vector normalized to a unit vector with respect to the p-norm. + + + + + A vector using dense storage. + + + + + Number of elements + + + + + Gets the vector's data. + + + + + Create a new dense vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new dense vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new dense vector directly binding to a raw array. + The array is used directly without copying. + Very efficient, but changes to the array and the vector will affect each other. + + + + + Create a new dense vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector and initialize each value using the provided value. + + + + + Create a new dense vector and initialize each value using the provided init function. + + + + + Create a new dense vector with values sampled from the provided random distribution. + + + + + Gets the vector's data. + + The vector's data. + + + + Returns a reference to the internal data structure. + + The DenseVector whose internal data we are + returning. + + A reference to the internal date of the given vector. + + + + + Returns a vector bound directly to a reference of the provided array. + + The array to bind to the DenseVector object. + + A DenseVector whose values are bound to the given array. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + The scalar to add. + The vector to store the result of the addition. + + + + Adds another vector to this vector and stores the result into the result vector. + + The vector to add to this one. + The vector to store the result of the addition. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The vector to store the result of the subtraction. + + + + Subtracts another vector to this vector and stores the result into the result vector. + + The vector to subtract from this one. + The vector to store the result of the subtraction. + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Negates vector and saves result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + The scalar to multiply. + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Multiplies a vector with a scalar. + + The vector to scale. + The scalar value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a scalar. + + The scalar value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a scalar. + + The vector to divide. + The scalar value. + The result of the division. + If is . + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The divisor to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The divisor to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + of each element of the vector of the given divisor. + + The vector whose elements we want to compute the remainder of. + The divisor to use, + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Creates a double dense vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n,n,..', '(n,n,..)', '[n,n,...]', where n is a double. + + + A double dense vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a real dense vector to double-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a real dense vector to double-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + A vector with sparse storage, intended for very large vectors where most of the cells are zero. + + The sparse vector is not thread safe. + + + + Gets the number of non zero elements in the vector. + + The number of non zero elements. + + + + Create a new sparse vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new sparse vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new sparse vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector and initialize each value using the provided value. + + + + + Create a new sparse vector and initialize each value using the provided init function. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + Warning, the new 'sparse vector' with a non-zero scalar added to it will be a 100% filled + sparse vector and very inefficient. Would be better to work with a dense vector instead. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Negates vector and saves result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Multiplies a vector with a scalar. + + The vector to scale. + The scalar value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a scalar. + + The scalar value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a scalar. + + The vector to divide. + The scalar value. + The result of the division. + If is . + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + of each element of the vector of the given divisor. + + The vector whose elements we want to compute the modulus of. + The divisor to use, + The result of the calculation + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Creates a double sparse vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n,n,..', '(n,n,..)', '[n,n,...]', where n is a double. + + + A double sparse vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a real sparse vector to double-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a real sparse vector to double-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + A simple milu(0) preconditioner. + + + Original Fortran code by Youcef Saad (07 January 2004) + + + + Use modified or standard ILU(0) + + + + Gets or sets a value indicating whether to use modified or standard ILU(0). + + + + + Gets a value indicating whether the preconditioner is initialized. + + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square or is not an + instance of SparseCompressedRowMatrixStorage. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector b. + The left hand side vector x. + + + + MILU0 is a simple milu(0) preconditioner. + + Order of the matrix. + Matrix values in CSR format (input). + Column indices (input). + Row pointers (input). + Matrix values in MSR format (output). + Row pointers and column indices (output). + Pointer to diagonal elements (output). + True if the modified/MILU algorithm should be used (recommended) + Returns 0 on success or k > 0 if a zero pivot was encountered at step k. + + + + A Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Bi-Conjugate Gradient Stabilized (BiCGStab) solver is an 'improvement' + of the standard Conjugate Gradient (CG) solver. Unlike the CG solver the + BiCGStab can be used on non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The Bi-CGSTAB algorithm was taken from:
+ Templates for the solution of linear systems: Building blocks + for iterative methods +
+ Richard Barrett, Michael Berry, Tony F. Chan, James Demmel, + June M. Donato, Jack Dongarra, Victor Eijkhout, Roldan Pozo, + Charles Romine and Henk van der Vorst +
+ Url: http://www.netlib.org/templates/Templates.html +
+ Algorithm is described in Chapter 2, section 2.3.8, page 27 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient , A. + The solution , b. + The result , x. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A composite matrix solver. The actual solver is made by a sequence of + matrix solvers. + + + + Solver based on:
+ Faster PDE-based simulations using robust composite linear solvers
+ S. Bhowmicka, P. Raghavan a,*, L. McInnes b, B. Norris
+ Future Generation Computer Systems, Vol 20, 2004, pp 373–387
+
+ + Note that if an iterator is passed to this solver it will be used for all the sub-solvers. + +
+
+ + + The collection of solvers that will be used + + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Generalized Product Bi-Conjugate Gradient iterative matrix solver. + + + + The Generalized Product Bi-Conjugate Gradient (GPBiCG) solver is an + alternative version of the Bi-Conjugate Gradient stabilized (CG) solver. + Unlike the CG solver the GPBiCG solver can be used on + non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The GPBiCG algorithm was taken from:
+ GPBiCG(m,l): A hybrid of BiCGSTAB and GPBiCG methods with + efficiency and robustness +
+ S. Fujino +
+ Applied Numerical Mathematics, Volume 41, 2002, pp 107 - 117 +
+
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Indicates the number of BiCGStab steps should be taken + before switching. + + + + + Indicates the number of GPBiCG steps should be taken + before switching. + + + + + Gets or sets the number of steps taken with the BiCgStab algorithm + before switching over to the GPBiCG algorithm. + + + + + Gets or sets the number of steps taken with the GPBiCG algorithm + before switching over to the BiCgStab algorithm. + + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Decide if to do steps with BiCgStab + + Number of iteration + true if yes, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Multiple-Lanczos Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Multiple-Lanczos Bi-Conjugate Gradient stabilized (ML(k)-BiCGStab) solver is an 'improvement' + of the standard BiCgStab solver. + + + The algorithm was taken from:
+ ML(k)BiCGSTAB: A BiCGSTAB variant based on multiple Lanczos starting vectors +
+ Man-chung Yeung and Tony F. Chan +
+ SIAM Journal of Scientific Computing +
+ Volume 21, Number 4, pp. 1263 - 1290 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + The default number of starting vectors. + + + + + The collection of starting vectors which are used as the basis for the Krylov sub-space. + + + + + The number of starting vectors used by the algorithm + + + + + Gets or sets the number of starting vectors. + + + Must be larger than 1 and smaller than the number of variables in the matrix that + for which this solver will be used. + + + + + Resets the number of starting vectors to the default value. + + + + + Gets or sets a series of orthonormal vectors which will be used as basis for the + Krylov sub-space. + + + + + Gets the number of starting vectors to create + + Maximum number + Number of variables + Number of starting vectors to create + + + + Returns an array of starting vectors. + + The maximum number of starting vectors that should be created. + The number of variables. + + An array with starting vectors. The array will never be larger than the + but it may be smaller if + the is smaller than + the . + + + + + Create random vectors array + + Number of vectors + Size of each vector + Array of random vectors + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Source A. + Residual data. + x data. + b data. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Transpose Free Quasi-Minimal Residual (TFQMR) iterative matrix solver. + + + + The TFQMR algorithm was taken from:
+ Iterative methods for sparse linear systems. +
+ Yousef Saad +
+ Algorithm is described in Chapter 7, section 7.4.3, page 219 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Is even? + + Number to check + true if even, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A diagonal preconditioner. The preconditioner uses the inverse + of the matrix diagonal as preconditioning values. + + + + + The inverse of the matrix diagonal. + + + + + Returns the decomposed matrix diagonal. + + The matrix diagonal. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + This class performs an Incomplete LU factorization with drop tolerance + and partial pivoting. The drop tolerance indicates which additional entries + will be dropped from the factorized LU matrices. + + + The ILUTP-Mem algorithm was taken from:
+ ILUTP_Mem: a Space-Efficient Incomplete LU Preconditioner +
+ Tzu-Yi Chen, Department of Mathematics and Computer Science,
+ Pomona College, Claremont CA 91711, USA
+ Published in:
+ Lecture Notes in Computer Science
+ Volume 3046 / 2004
+ pp. 20 - 28
+ Algorithm is described in Section 2, page 22 +
+
+ + + The default fill level. + + + + + The default drop tolerance. + + + + + The decomposed upper triangular matrix. + + + + + The decomposed lower triangular matrix. + + + + + The array containing the pivot values. + + + + + The fill level. + + + + + The drop tolerance. + + + + + The pivot tolerance. + + + + + Initializes a new instance of the class with the default settings. + + + + + Initializes a new instance of the class with the specified settings. + + + The amount of fill that is allowed in the matrix. The value is a fraction of + the number of non-zero entries in the original matrix. Values should be positive. + + + The absolute drop tolerance which indicates below what absolute value an entry + will be dropped from the matrix. A drop tolerance of 0.0 means that no values + will be dropped. Values should always be positive. + + + The pivot tolerance which indicates at what level pivoting will take place. A + value of 0.0 means that no pivoting will take place. + + + + + Gets or sets the amount of fill that is allowed in the matrix. The + value is a fraction of the number of non-zero entries in the original + matrix. The standard value is 200. + + + + Values should always be positive and can be higher than 1.0. A value lower + than 1.0 means that the eventual preconditioner matrix will have fewer + non-zero entries as the original matrix. A value higher than 1.0 means that + the eventual preconditioner can have more non-zero values than the original + matrix. + + + Note that any changes to the FillLevel after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the absolute drop tolerance which indicates below what absolute value + an entry will be dropped from the matrix. The standard value is 0.0001. + + + + The values should always be positive and can be larger than 1.0. A low value will + keep more small numbers in the preconditioner matrix. A high value will remove + more small numbers from the preconditioner matrix. + + + Note that any changes to the DropTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the pivot tolerance which indicates at what level pivoting will + take place. The standard value is 0.0 which means pivoting will never take place. + + + + The pivot tolerance is used to calculate if pivoting is necessary. Pivoting + will take place if any of the values in a row is bigger than the + diagonal value of that row divided by the pivot tolerance, i.e. pivoting + will take place if row(i,j) > row(i,i) / PivotTolerance for + any j that is not equal to i. + + + Note that any changes to the PivotTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the lower triagonal elements. + + + + Returns the pivot array. This array is not needed for normal use because + the preconditioner will return the solution vector values in the proper order. + + + This method is used for debugging purposes only and should normally not be used. + + The pivot array. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. Note that the + method takes a general matrix type. However internally the data is stored + as a sparse matrix. Therefore it is not recommended to pass a dense matrix. + + If is . + If is not a square matrix. + + + + Pivot elements in the according to internal pivot array + + Row to pivot in + + + + Was pivoting already performed + + Pivots already done + Current item to pivot + true if performed, otherwise false + + + + Swap columns in the + + Source . + First column index to swap + Second column index to swap + + + + Sort vector descending, not changing vector but placing sorted indicies to + + Start sort form + Sort till upper bound + Array with sorted vector indicies + Source + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + Pivot elements in according to internal pivot array + + Source . + Result after pivoting. + + + + An element sort algorithm for the class. + + + This sort algorithm is used to sort the columns in a sparse matrix based on + the value of the element on the diagonal of the matrix. + + + + + Sorts the elements of the vector in decreasing + fashion. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Sorts the elements of the vector in decreasing + fashion using heap sort algorithm. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Build heap for double indicies + + Root position + Length of + Indicies of + Target + + + + Sift double indicies + + Indicies of + Target + Root position + Length of + + + + Sorts the given integers in a decreasing fashion. + + The values. + + + + Sort the given integers in a decreasing fashion using heapsort algorithm + + Array of values to sort + Length of + + + + Build heap + + Target values array + Root position + Length of + + + + Sift values + + Target value array + Root position + Length of + + + + Exchange values in array + + Target values array + First value to exchange + Second value to exchange + + + + An incomplete, level 0, LU factorization preconditioner. + + + The ILU(0) algorithm was taken from:
+ Iterative methods for sparse linear systems
+ Yousef Saad
+ Algorithm is described in Chapter 10, section 10.3.2, page 275
+
+
+ + + The matrix holding the lower (L) and upper (U) matrices. The + decomposition matrices are combined to reduce storage. + + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + A new matrix containing the lower triagonal elements. + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + A Matrix class with dense storage. The underlying storage is a one dimensional array in column-major order (column by column). + + + + + Number of rows. + + Using this instead of the RowCount property to speed up calculating + a matrix index in the data array. + + + + Number of columns. + + Using this instead of the ColumnCount property to speed up calculating + a matrix index in the data array. + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new dense matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new dense matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to be in column-major order (column by column) and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + + Create a new dense matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable. + The enumerable is assumed to be in column-major order (column by column). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix and initialize each value to the same provided value. + + + + + Create a new dense matrix and initialize each value using the provided init function. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new dense matrix with values sampled from the provided random distribution. + + + + + Gets the matrix's data. + + The matrix's data. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of add + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the matrix and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Evaluates whether this matrix is symmetric. + + + + + A vector using dense storage. + + + + + Number of elements + + + + + Gets the vector's data. + + + + + Create a new dense vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new dense vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new dense vector directly binding to a raw array. + The array is used directly without copying. + Very efficient, but changes to the array and the vector will affect each other. + + + + + Create a new dense vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector and initialize each value using the provided value. + + + + + Create a new dense vector and initialize each value using the provided init function. + + + + + Create a new dense vector with values sampled from the provided random distribution. + + + + + Gets the vector's data. + + The vector's data. + + + + Returns a reference to the internal data structure. + + The DenseVector whose internal data we are + returning. + + A reference to the internal date of the given vector. + + + + + Returns a vector bound directly to a reference of the provided array. + + The array to bind to the DenseVector object. + + A DenseVector whose values are bound to the given array. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + The scalar to add. + The vector to store the result of the addition. + + + + Adds another vector to this vector and stores the result into the result vector. + + The vector to add to this one. + The vector to store the result of the addition. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The vector to store the result of the subtraction. + + + + Subtracts another vector from this vector and stores the result into the result vector. + + The vector to subtract from this one. + The vector to store the result of the subtraction. + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Negates vector and saves result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + The scalar to multiply. + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Multiplies a vector with a scalar. + + The vector to scale. + The scalar value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a scalar. + + The scalar value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a scalar. + + The vector to divide. + The scalar value. + The result of the division. + If is . + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + of each element of the vector of the given divisor. + + The vector whose elements we want to compute the modulus of. + The divisor to use, + The result of the calculation + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Creates a float dense vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n,n,..', '(n,n,..)', '[n,n,...]', where n is a float. + + + A float dense vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a real dense vector to float-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a real dense vector to float-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + A matrix type for diagonal matrices. + + + Diagonal matrices can be non-square matrices but the diagonal always starts + at element 0,0. A diagonal matrix will throw an exception if non diagonal + entries are set. The exception to this is when the off diagonal elements are + 0.0 or NaN; these settings will cause no change to the diagonal matrix. + + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new diagonal matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All diagonal cells of the matrix will be initialized to the provided value, all non-diagonal ones to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to contain the diagonal elements only and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new diagonal matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + The matrix to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + The array to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided enumerable. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new diagonal matrix with diagonal values sampled from the provided random distribution. + + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + If the result matrix's dimensions are not the same as this matrix. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to add. + The matrix to store the result of the division. + + + + Computes the determinant of this matrix. + + The determinant of this matrix. + + + + Returns the elements of the diagonal in a . + + The elements of the diagonal. + For non-square matrices, the method returns Min(Rows, Columns) elements where + i == j (i is the row index, and j is the column index). + + + + Copies the values of the given array to the diagonal. + + The array to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + + Copies the values of the given to the diagonal. + + The vector to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced L2 norm of the matrix. + The largest singular value of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + Calculates the condition number of this matrix. + The condition number of the matrix. + + + Computes the inverse of this matrix. + If is not a square matrix. + If is singular. + The inverse of this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Creates a matrix that contains the values from the requested sub-matrix. + + The row to start copying from. + The number of rows to copy. Must be positive. + The column to start copying from. + The number of columns to copy. Must be positive. + The requested sub-matrix. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + If or + is not positive. + + + + Permute the columns of a matrix according to a permutation. + + The column permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Permute the rows of a matrix according to a permutation. + + The row permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Evaluates whether this matrix is symmetric. + + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + A class which encapsulates the functionality of a Cholesky factorization. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Gets the determinant of the matrix for which the Cholesky matrix was computed. + + + + + Gets the log determinant of the matrix for which the Cholesky matrix was computed. + + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an orthogonal matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Factorize matrix using the modified Gram-Schmidt method. + + Initial matrix. On exit is replaced by Q. + Number of rows in Q. + Number of columns in Q. + On exit is filled by R. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Symmetric Householder reduction to tridiagonal form. + + Data array of matrix V (eigenvectors) + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tred2 by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + Data array of matrix V (eigenvectors) + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Nonsymmetric reduction to Hessenberg form. + + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Complex scalar division X/Y. + + Real part of X + Imaginary part of X + Real part of Y + Imaginary part of Y + Division result as a number. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of a Cholesky factorization for dense matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Gets or sets Tau vector. Contains additional information on Q - used for native solver. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The QR factorization method to use. + If is null. + If row count is less then column count + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + If SVD algorithm failed to converge with matrix . + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Gets the absolute value of determinant of the square matrix for which the EVD was computed. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + In the Math.Net implementation we also store a set of pivot elements for increased + numerical stability. The pivot elements encode a permutation matrix P such that P*A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Gets the determinant of the matrix for which the LU factorization was computed. + + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A (m x n) may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + If a factorization is peformed, the resulting Q matrix is an m x m matrix + and the R matrix is an m x n matrix. If a factorization is performed, the + resulting Q matrix is an m x n matrix and the R matrix is an n x n matrix. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD). + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets the two norm of the . + + The 2-norm of the . + + + + Gets the condition number max(S) / min(S) + + The condition number. + + + + Gets the determinant of the square matrix for which the SVD was computed. + + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an orthogonal matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of a Cholesky factorization for user matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Symmetric Householder reduction to tridiagonal form. + + The eigen vectors to work on. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tred2 by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + The eigen vectors to work on. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Nonsymmetric reduction to Hessenberg form. + + The eigen vectors to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + The eigen vectors to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Complex scalar division X/Y. + + Real part of X + Imaginary part of X + Real part of Y + Imaginary part of Y + Division result as a number. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The QR factorization method to use. + If is null. + + + + Generate column from initial matrix to work array + + Initial matrix + The first row + Column index + Generated vector + + + + Perform calculation of Q or R + + Work array + Q or R matrices + The first row + The last row + The first column + The last column + Number of available CPUs + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + + + + + Calculates absolute value of multiplied on signum function of + + Double value z1 + Double value z2 + Result multiplication of signum function and absolute value + + + + Swap column and + + Source matrix + The number of rows in + Column A index to swap + Column B index to swap + + + + Scale column by starting from row + + Source matrix + The number of rows in + Column to scale + Row to scale from + Scale value + + + + Scale vector by starting from index + + Source vector + Row to scale from + Scale value + + + + Given the Cartesian coordinates (da, db) of a point p, these fucntion return the parameters da, db, c, and s + associated with the Givens rotation that zeros the y-coordinate of the point. + + Provides the x-coordinate of the point p. On exit contains the parameter r associated with the Givens rotation + Provides the y-coordinate of the point p. On exit contains the parameter z associated with the Givens rotation + Contains the parameter c associated with the Givens rotation + Contains the parameter s associated with the Givens rotation + This is equivalent to the DROTG LAPACK routine. + + + + Calculate Norm 2 of the column in matrix starting from row + + Source matrix + The number of rows in + Column index + Start row index + Norm2 (Euclidean norm) of the column + + + + Calculate Norm 2 of the vector starting from index + + Source vector + Start index + Norm2 (Euclidean norm) of the vector + + + + Calculate dot product of and + + Source matrix + The number of rows in + Index of column A + Index of column B + Starting row index + Dot product value + + + + Performs rotation of points in the plane. Given two vectors x and y , + each vector element of these vectors is replaced as follows: x(i) = c*x(i) + s*y(i); y(i) = c*y(i) - s*x(i) + + Source matrix + The number of rows in + Index of column A + Index of column B + Scalar "c" value + Scalar "s" value + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + float version of the class. + + + + + Initializes a new instance of the Matrix class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Returns the conjugate transpose of this matrix. + + The conjugate transpose of this matrix. + + + + Puts the conjugate transpose of this matrix into the result matrix. + + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to divide by each element of the matrix. + The matrix to store the result of the division. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The matrix to store the result of the pointwise power. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Computes the Moore-Penrose Pseudo-Inverse of this matrix. + + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Calculates the p-norms of all row vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the p-norms of all column vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all row vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all column vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the value sum of each row vector. + + + + + Calculates the absolute value sum of each row vector. + + + + + Calculates the value sum of each column vector. + + + + + Calculates the absolute value sum of each column vector. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A Matrix with sparse storage, intended for very large matrices where most of the cells are zero. + The underlying storage scheme is 3-array compressed-sparse-row (CSR) Format. + Wikipedia - CSR. + + + + + Gets the number of non zero elements in the matrix. + + The number of non zero elements. + + + + Create a new sparse matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new sparse matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable. + The enumerable is assumed to be in row-major order (row by row). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + + Create a new sparse matrix with the given number of rows and columns as a copy of the given array. + The array is assumed to be in column-major order (column by column). + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix and initialize each value to the same provided value. + + + + + Create a new sparse matrix and initialize each value using the provided init function. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Evaluates whether this matrix is symmetric. + + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + A vector with sparse storage, intended for very large vectors where most of the cells are zero. + + The sparse vector is not thread safe. + + + + Gets the number of non zero elements in the vector. + + The number of non zero elements. + + + + Create a new sparse vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new sparse vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new sparse vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector and initialize each value using the provided value. + + + + + Create a new sparse vector and initialize each value using the provided init function. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + Warning, the new 'sparse vector' with a non-zero scalar added to it will be a 100% filled + sparse vector and very inefficient. Would be better to work with a dense vector instead. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Negates vector and saves result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Multiplies a vector with a scalar. + + The vector to scale. + The scalar value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a scalar. + + The scalar value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a scalar. + + The vector to divide. + The scalar value. + The result of the division. + If is . + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + of each element of the vector of the given divisor. + + The vector whose elements we want to compute the modulus of. + The divisor to use, + The result of the calculation + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Creates a float sparse vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n,n,..', '(n,n,..)', '[n,n,...]', where n is a float. + + + A float sparse vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a real sparse vector to float-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a real sparse vector to float-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + float version of the class. + + + + + Initializes a new instance of the Vector class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Conjugates vector and save result to + + Target vector + + + + Negates vector and saves result to + + Target vector + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Divides each element of the vector by a scalar and stores the result in the result vector. + + + The scalar to divide with. + + + The vector to store the result of the division. + + + + + Divides a scalar by each element of the vector and stores the result in the result vector. + + The scalar to divide. + The vector to store the result of the division. + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise raise this vector to an exponent and store the result into the result vector. + + The exponent to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Returns the value of the absolute minimum element. + + The value of the absolute minimum element. + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the value of the absolute maximum element. + + The value of the absolute maximum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + + The p value. + + + Scalar ret = ( ∑|At(i)|^p )^(1/p) + + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Normalizes this vector to a unit vector with respect to the p-norm. + + + The p value. + + + This vector normalized to a unit vector with respect to the p-norm. + + + + + A simple milu(0) preconditioner. + + + Original Fortran code by Youcef Saad (07 January 2004) + + + + Use modified or standard ILU(0) + + + + Gets or sets a value indicating whether to use modified or standard ILU(0). + + + + + Gets a value indicating whether the preconditioner is initialized. + + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square or is not an + instance of SparseCompressedRowMatrixStorage. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector b. + The left hand side vector x. + + + + MILU0 is a simple milu(0) preconditioner. + + Order of the matrix. + Matrix values in CSR format (input). + Column indices (input). + Row pointers (input). + Matrix values in MSR format (output). + Row pointers and column indices (output). + Pointer to diagonal elements (output). + True if the modified/MILU algorithm should be used (recommended) + Returns 0 on success or k > 0 if a zero pivot was encountered at step k. + + + + A Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Bi-Conjugate Gradient Stabilized (BiCGStab) solver is an 'improvement' + of the standard Conjugate Gradient (CG) solver. Unlike the CG solver the + BiCGStab can be used on non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The Bi-CGSTAB algorithm was taken from:
+ Templates for the solution of linear systems: Building blocks + for iterative methods +
+ Richard Barrett, Michael Berry, Tony F. Chan, James Demmel, + June M. Donato, Jack Dongarra, Victor Eijkhout, Roldan Pozo, + Charles Romine and Henk van der Vorst +
+ Url: http://www.netlib.org/templates/Templates.html +
+ Algorithm is described in Chapter 2, section 2.3.8, page 27 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient , A. + The solution , b. + The result , x. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A composite matrix solver. The actual solver is made by a sequence of + matrix solvers. + + + + Solver based on:
+ Faster PDE-based simulations using robust composite linear solvers
+ S. Bhowmicka, P. Raghavan a,*, L. McInnes b, B. Norris
+ Future Generation Computer Systems, Vol 20, 2004, pp 373–387
+
+ + Note that if an iterator is passed to this solver it will be used for all the sub-solvers. + +
+
+ + + The collection of solvers that will be used + + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Generalized Product Bi-Conjugate Gradient iterative matrix solver. + + + + The Generalized Product Bi-Conjugate Gradient (GPBiCG) solver is an + alternative version of the Bi-Conjugate Gradient stabilized (CG) solver. + Unlike the CG solver the GPBiCG solver can be used on + non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The GPBiCG algorithm was taken from:
+ GPBiCG(m,l): A hybrid of BiCGSTAB and GPBiCG methods with + efficiency and robustness +
+ S. Fujino +
+ Applied Numerical Mathematics, Volume 41, 2002, pp 107 - 117 +
+
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Indicates the number of BiCGStab steps should be taken + before switching. + + + + + Indicates the number of GPBiCG steps should be taken + before switching. + + + + + Gets or sets the number of steps taken with the BiCgStab algorithm + before switching over to the GPBiCG algorithm. + + + + + Gets or sets the number of steps taken with the GPBiCG algorithm + before switching over to the BiCgStab algorithm. + + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Decide if to do steps with BiCgStab + + Number of iteration + true if yes, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Multiple-Lanczos Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Multiple-Lanczos Bi-Conjugate Gradient stabilized (ML(k)-BiCGStab) solver is an 'improvement' + of the standard BiCgStab solver. + + + The algorithm was taken from:
+ ML(k)BiCGSTAB: A BiCGSTAB variant based on multiple Lanczos starting vectors +
+ Man-chung Yeung and Tony F. Chan +
+ SIAM Journal of Scientific Computing +
+ Volume 21, Number 4, pp. 1263 - 1290 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + The default number of starting vectors. + + + + + The collection of starting vectors which are used as the basis for the Krylov sub-space. + + + + + The number of starting vectors used by the algorithm + + + + + Gets or sets the number of starting vectors. + + + Must be larger than 1 and smaller than the number of variables in the matrix that + for which this solver will be used. + + + + + Resets the number of starting vectors to the default value. + + + + + Gets or sets a series of orthonormal vectors which will be used as basis for the + Krylov sub-space. + + + + + Gets the number of starting vectors to create + + Maximum number + Number of variables + Number of starting vectors to create + + + + Returns an array of starting vectors. + + The maximum number of starting vectors that should be created. + The number of variables. + + An array with starting vectors. The array will never be larger than the + but it may be smaller if + the is smaller than + the . + + + + + Create random vecrors array + + Number of vectors + Size of each vector + Array of random vectors + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Source A. + Residual data. + x data. + b data. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Transpose Free Quasi-Minimal Residual (TFQMR) iterative matrix solver. + + + + The TFQMR algorithm was taken from:
+ Iterative methods for sparse linear systems. +
+ Yousef Saad +
+ Algorithm is described in Chapter 7, section 7.4.3, page 219 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Is even? + + Number to check + true if even, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A diagonal preconditioner. The preconditioner uses the inverse + of the matrix diagonal as preconditioning values. + + + + + The inverse of the matrix diagonal. + + + + + Returns the decomposed matrix diagonal. + + The matrix diagonal. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + This class performs an Incomplete LU factorization with drop tolerance + and partial pivoting. The drop tolerance indicates which additional entries + will be dropped from the factorized LU matrices. + + + The ILUTP-Mem algorithm was taken from:
+ ILUTP_Mem: a Space-Efficient Incomplete LU Preconditioner +
+ Tzu-Yi Chen, Department of Mathematics and Computer Science,
+ Pomona College, Claremont CA 91711, USA
+ Published in:
+ Lecture Notes in Computer Science
+ Volume 3046 / 2004
+ pp. 20 - 28
+ Algorithm is described in Section 2, page 22 +
+
+ + + The default fill level. + + + + + The default drop tolerance. + + + + + The decomposed upper triangular matrix. + + + + + The decomposed lower triangular matrix. + + + + + The array containing the pivot values. + + + + + The fill level. + + + + + The drop tolerance. + + + + + The pivot tolerance. + + + + + Initializes a new instance of the class with the default settings. + + + + + Initializes a new instance of the class with the specified settings. + + + The amount of fill that is allowed in the matrix. The value is a fraction of + the number of non-zero entries in the original matrix. Values should be positive. + + + The absolute drop tolerance which indicates below what absolute value an entry + will be dropped from the matrix. A drop tolerance of 0.0 means that no values + will be dropped. Values should always be positive. + + + The pivot tolerance which indicates at what level pivoting will take place. A + value of 0.0 means that no pivoting will take place. + + + + + Gets or sets the amount of fill that is allowed in the matrix. The + value is a fraction of the number of non-zero entries in the original + matrix. The standard value is 200. + + + + Values should always be positive and can be higher than 1.0. A value lower + than 1.0 means that the eventual preconditioner matrix will have fewer + non-zero entries as the original matrix. A value higher than 1.0 means that + the eventual preconditioner can have more non-zero values than the original + matrix. + + + Note that any changes to the FillLevel after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the absolute drop tolerance which indicates below what absolute value + an entry will be dropped from the matrix. The standard value is 0.0001. + + + + The values should always be positive and can be larger than 1.0. A low value will + keep more small numbers in the preconditioner matrix. A high value will remove + more small numbers from the preconditioner matrix. + + + Note that any changes to the DropTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the pivot tolerance which indicates at what level pivoting will + take place. The standard value is 0.0 which means pivoting will never take place. + + + + The pivot tolerance is used to calculate if pivoting is necessary. Pivoting + will take place if any of the values in a row is bigger than the + diagonal value of that row divided by the pivot tolerance, i.e. pivoting + will take place if row(i,j) > row(i,i) / PivotTolerance for + any j that is not equal to i. + + + Note that any changes to the PivotTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the lower triagonal elements. + + + + Returns the pivot array. This array is not needed for normal use because + the preconditioner will return the solution vector values in the proper order. + + + This method is used for debugging purposes only and should normally not be used. + + The pivot array. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. Note that the + method takes a general matrix type. However internally the data is stored + as a sparse matrix. Therefore it is not recommended to pass a dense matrix. + + If is . + If is not a square matrix. + + + + Pivot elements in the according to internal pivot array + + Row to pivot in + + + + Was pivoting already performed + + Pivots already done + Current item to pivot + true if performed, otherwise false + + + + Swap columns in the + + Source . + First column index to swap + Second column index to swap + + + + Sort vector descending, not changing vector but placing sorted indicies to + + Start sort form + Sort till upper bound + Array with sorted vector indicies + Source + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + Pivot elements in according to internal pivot array + + Source . + Result after pivoting. + + + + An element sort algorithm for the class. + + + This sort algorithm is used to sort the columns in a sparse matrix based on + the value of the element on the diagonal of the matrix. + + + + + Sorts the elements of the vector in decreasing + fashion. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Sorts the elements of the vector in decreasing + fashion using heap sort algorithm. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Build heap for double indicies + + Root position + Length of + Indicies of + Target + + + + Sift double indicies + + Indicies of + Target + Root position + Length of + + + + Sorts the given integers in a decreasing fashion. + + The values. + + + + Sort the given integers in a decreasing fashion using heapsort algorithm + + Array of values to sort + Length of + + + + Build heap + + Target values array + Root position + Length of + + + + Sift values + + Target value array + Root position + Length of + + + + Exchange values in array + + Target values array + First value to exchange + Second value to exchange + + + + An incomplete, level 0, LU factorization preconditioner. + + + The ILU(0) algorithm was taken from:
+ Iterative methods for sparse linear systems
+ Yousef Saad
+ Algorithm is described in Chapter 10, section 10.3.2, page 275
+
+
+ + + The matrix holding the lower (L) and upper (U) matrices. The + decomposition matrices are combined to reduce storage. + + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + A new matrix containing the lower triagonal elements. + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + A Matrix class with dense storage. The underlying storage is a one dimensional array in column-major order (column by column). + + + + + Number of rows. + + Using this instead of the RowCount property to speed up calculating + a matrix index in the data array. + + + + Number of columns. + + Using this instead of the ColumnCount property to speed up calculating + a matrix index in the data array. + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new dense matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new dense matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to be in column-major order (column by column) and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + + Create a new dense matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable. + The enumerable is assumed to be in column-major order (column by column). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix and initialize each value to the same provided value. + + + + + Create a new dense matrix and initialize each value using the provided init function. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new dense matrix with values sampled from the provided random distribution. + + + + + Gets the matrix's data. + + The matrix's data. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of add + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the matrix and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Evaluates whether this matrix is symmetric. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A vector using dense storage. + + + + + Number of elements + + + + + Gets the vector's data. + + + + + Create a new dense vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new dense vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new dense vector directly binding to a raw array. + The array is used directly without copying. + Very efficient, but changes to the array and the vector will affect each other. + + + + + Create a new dense vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector and initialize each value using the provided value. + + + + + Create a new dense vector and initialize each value using the provided init function. + + + + + Create a new dense vector with values sampled from the provided random distribution. + + + + + Gets the vector's data. + + The vector's data. + + + + Returns a reference to the internal data structure. + + The DenseVector whose internal data we are + returning. + + A reference to the internal date of the given vector. + + + + + Returns a vector bound directly to a reference of the provided array. + + The array to bind to the DenseVector object. + + A DenseVector whose values are bound to the given array. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + The scalar to add. + The vector to store the result of the addition. + + + + Adds another vector to this vector and stores the result into the result vector. + + The vector to add to this one. + The vector to store the result of the addition. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The vector to store the result of the subtraction. + + + + Subtracts another vector from this vector and stores the result into the result vector. + + The vector to subtract from this one. + The vector to store the result of the subtraction. + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Negates vector and saves result to + + Target vector + + + + Conjugates vector and save result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + The scalar to multiply. + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Multiplies a vector with a complex. + + The vector to scale. + The Complex value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a complex. + + The Complex value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a complex. + + The vector to divide. + The Complex value. + The result of the division. + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Creates a Complex dense vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n;n;..', '(n;n;..)', '[n;n;...]', where n is a double. + + + A Complex dense vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a complex dense vector to double-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a complex dense vector to double-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + A matrix type for diagonal matrices. + + + Diagonal matrices can be non-square matrices but the diagonal always starts + at element 0,0. A diagonal matrix will throw an exception if non diagonal + entries are set. The exception to this is when the off diagonal elements are + 0.0 or NaN; these settings will cause no change to the diagonal matrix. + + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new diagonal matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All diagonal cells of the matrix will be initialized to the provided value, all non-diagonal ones to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to contain the diagonal elements only and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new diagonal matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + The matrix to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + The array to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided enumerable. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new diagonal matrix with diagonal values sampled from the provided random distribution. + + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + If the result matrix's dimensions are not the same as this matrix. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to add. + The matrix to store the result of the division. + + + + Computes the determinant of this matrix. + + The determinant of this matrix. + + + + Returns the elements of the diagonal in a . + + The elements of the diagonal. + For non-square matrices, the method returns Min(Rows, Columns) elements where + i == j (i is the row index, and j is the column index). + + + + Copies the values of the given array to the diagonal. + + The array to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + + Copies the values of the given to the diagonal. + + The vector to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced L2 norm of the matrix. + The largest singular value of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the Frobenius norm of this matrix. + The Frobenius norm of this matrix. + + + Calculates the condition number of this matrix. + The condition number of the matrix. + + + Computes the inverse of this matrix. + If is not a square matrix. + If is singular. + The inverse of this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Creates a matrix that contains the values from the requested sub-matrix. + + The row to start copying from. + The number of rows to copy. Must be positive. + The column to start copying from. + The number of columns to copy. Must be positive. + The requested sub-matrix. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + If or + is not positive. + + + + Permute the columns of a matrix according to a permutation. + + The column permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Permute the rows of a matrix according to a permutation. + + The row permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Evaluates whether this matrix is symmetric. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A class which encapsulates the functionality of a Cholesky factorization. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Gets the determinant of the matrix for which the Cholesky matrix was computed. + + + + + Gets the log determinant of the matrix for which the Cholesky matrix was computed. + + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Gets the absolute value of determinant of the square matrix for which the EVD was computed. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + In the Math.Net implementation we also store a set of pivot elements for increased + numerical stability. The pivot elements encode a permutation matrix P such that P*A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Gets the determinant of the matrix for which the LU factorization was computed. + + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A (m x n) may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + If a factorization is peformed, the resulting Q matrix is an m x m matrix + and the R matrix is an m x n matrix. If a factorization is performed, the + resulting Q matrix is an m x n matrix and the R matrix is an n x n matrix. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD). + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets the two norm of the . + + The 2-norm of the . + + + + Gets the condition number max(S) / min(S) + + The condition number. + + + + Gets the determinant of the square matrix for which the SVD was computed. + + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any complex square matrix A may be decomposed as A = QR where Q is an unitary mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an unitary matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Factorize matrix using the modified Gram-Schmidt method. + + Initial matrix. On exit is replaced by Q. + Number of rows in Q. + Number of columns in Q. + On exit is filled by R. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a complex matrix. + + + If A is hermitan, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is hermitan. + I.e. A = V*D*V' and V*VH=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Reduces a complex hermitian matrix to a real symmetric tridiagonal matrix using unitary similarity transformations. + + Source matrix to reduce + Output: Arrays for internal storage of real parts of eigenvalues + Output: Arrays for internal storage of imaginary parts of eigenvalues + Output: Arrays that contains further information about the transformations. + Order of initial matrix + This is derived from the Algol procedures HTRIDI by + Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + Data array of matrix V (eigenvectors) + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Determines eigenvectors by undoing the symmetric tridiagonalize transformation + + Data array of matrix V (eigenvectors) + Previously tridiagonalized matrix by . + Contains further information about the transformations + Input matrix order + This is derived from the Algol procedures HTRIBK, by + by Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Nonsymmetric reduction to Hessenberg form. + + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + Data array of the eigenvectors + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of a Cholesky factorization for dense matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Gets or sets Tau vector. Contains additional information on Q - used for native solver. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The type of QR factorization to perform. + If is null. + If row count is less then column count + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + If SVD algorithm failed to converge with matrix . + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any complex square matrix A may be decomposed as A = QR where Q is an unitary mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an unitary matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of a Cholesky factorization for user matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a complex matrix. + + + If A is hermitan, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is hermitan. + I.e. A = V*D*V' and V*VH=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Reduces a complex hermitian matrix to a real symmetric tridiagonal matrix using unitary similarity transformations. + + Source matrix to reduce + Output: Arrays for internal storage of real parts of eigenvalues + Output: Arrays for internal storage of imaginary parts of eigenvalues + Output: Arrays that contains further information about the transformations. + Order of initial matrix + This is derived from the Algol procedures HTRIDI by + Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + The eigen vectors to work on. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Determines eigenvectors by undoing the symmetric tridiagonalize transformation + + The eigen vectors to work on. + Previously tridiagonalized matrix by . + Contains further information about the transformations + Input matrix order + This is derived from the Algol procedures HTRIBK, by + by Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Nonsymmetric reduction to Hessenberg form. + + The eigen vectors to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + The eigen vectors to work on. + The eigen values to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The QR factorization method to use. + If is null. + + + + Generate column from initial matrix to work array + + Initial matrix + The first row + Column index + Generated vector + + + + Perform calculation of Q or R + + Work array + Q or R matrices + The first row + The last row + The first column + The last column + Number of available CPUs + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + + + + + Calculates absolute value of multiplied on signum function of + + Complex value z1 + Complex value z2 + Result multiplication of signum function and absolute value + + + + Interchanges two vectors and + + Source matrix + The number of rows in + Column A index to swap + Column B index to swap + + + + Scale column by starting from row + + Source matrix + The number of rows in + Column to scale + Row to scale from + Scale value + + + + Scale vector by starting from index + + Source vector + Row to scale from + Scale value + + + + Given the Cartesian coordinates (da, db) of a point p, these fucntion return the parameters da, db, c, and s + associated with the Givens rotation that zeros the y-coordinate of the point. + + Provides the x-coordinate of the point p. On exit contains the parameter r associated with the Givens rotation + Provides the y-coordinate of the point p. On exit contains the parameter z associated with the Givens rotation + Contains the parameter c associated with the Givens rotation + Contains the parameter s associated with the Givens rotation + This is equivalent to the DROTG LAPACK routine. + + + + Calculate Norm 2 of the column in matrix starting from row + + Source matrix + The number of rows in + Column index + Start row index + Norm2 (Euclidean norm) of the column + + + + Calculate Norm 2 of the vector starting from index + + Source vector + Start index + Norm2 (Euclidean norm) of the vector + + + + Calculate dot product of and conjugating the first vector. + + Source matrix + The number of rows in + Index of column A + Index of column B + Starting row index + Dot product value + + + + Performs rotation of points in the plane. Given two vectors x and y , + each vector element of these vectors is replaced as follows: x(i) = c*x(i) + s*y(i); y(i) = c*y(i) - s*x(i) + + Source matrix + The number of rows in + Index of column A + Index of column B + scalar cos value + scalar sin value + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Complex version of the class. + + + + + Initializes a new instance of the Matrix class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Returns the conjugate transpose of this matrix. + + The conjugate transpose of this matrix. + + + + Puts the conjugate transpose of this matrix into the result matrix. + + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to divide by each element of the matrix. + The matrix to store the result of the division. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The matrix to store the result of the pointwise power. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Pointwise applies the exponential function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Computes the Moore-Penrose Pseudo-Inverse of this matrix. + + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Calculates the p-norms of all row vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the p-norms of all column vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all row vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all column vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the value sum of each row vector. + + + + + Calculates the absolute value sum of each row vector. + + + + + Calculates the value sum of each column vector. + + + + + Calculates the absolute value sum of each column vector. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A Matrix with sparse storage, intended for very large matrices where most of the cells are zero. + The underlying storage scheme is 3-array compressed-sparse-row (CSR) Format. + Wikipedia - CSR. + + + + + Gets the number of non zero elements in the matrix. + + The number of non zero elements. + + + + Create a new sparse matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new sparse matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable. + The enumerable is assumed to be in row-major order (row by row). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + + Create a new sparse matrix with the given number of rows and columns as a copy of the given array. + The array is assumed to be in column-major order (column by column). + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix and initialize each value to the same provided value. + + + + + Create a new sparse matrix and initialize each value using the provided init function. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Evaluates whether this matrix is symmetric. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Complex version of the class. + + + + + Initializes a new instance of the Vector class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Conjugates vector and save result to + + Target vector + + + + Negates vector and saves result to + + Target vector + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Divides each element of the vector by a scalar and stores the result in the result vector. + + + The scalar to divide with. + + + The vector to store the result of the division. + + + + + Divides a scalar by each element of the vector and stores the result in the result vector. + + The scalar to divide. + The vector to store the result of the division. + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise raise this vector to an exponent and store the result into the result vector. + + The exponent to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Returns the value of the absolute minimum element. + + The value of the absolute minimum element. + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the value of the absolute maximum element. + + The value of the absolute maximum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + + The p value. + + + Scalar ret = ( ∑|At(i)|^p )^(1/p) + + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Normalizes this vector to a unit vector with respect to the p-norm. + + + The p value. + + + This vector normalized to a unit vector with respect to the p-norm. + + + + + A vector with sparse storage, intended for very large vectors where most of the cells are zero. + + The sparse vector is not thread safe. + + + + Gets the number of non zero elements in the vector. + + The number of non zero elements. + + + + Create a new sparse vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new sparse vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new sparse vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector and initialize each value using the provided value. + + + + + Create a new sparse vector and initialize each value using the provided init function. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + Warning, the new 'sparse vector' with a non-zero scalar added to it will be a 100% filled + sparse vector and very inefficient. Would be better to work with a dense vector instead. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Negates vector and saves result to + + Target vector + + + + Conjugates vector and save result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Multiplies a vector with a complex. + + The vector to scale. + The complex value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a complex. + + The complex value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a complex. + + The vector to divide. + The complex value. + The result of the division. + If is . + + + + Computes the modulus of each element of the vector of the given divisor. + + The vector whose elements we want to compute the modulus of. + The divisor to use, + The result of the calculation + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Creates a double sparse vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n;n;..', '(n;n;..)', '[n;n;...]', where n is a Complex. + + + A double sparse vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a complex sparse vector to double-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a complex sparse vector to double-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + A simple milu(0) preconditioner. + + + Original Fortran code by Youcef Saad (07 January 2004) + + + + Use modified or standard ILU(0) + + + + Gets or sets a value indicating whether to use modified or standard ILU(0). + + + + + Gets a value indicating whether the preconditioner is initialized. + + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square or is not an + instance of SparseCompressedRowMatrixStorage. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector b. + The left hand side vector x. + + + + MILU0 is a simple milu(0) preconditioner. + + Order of the matrix. + Matrix values in CSR format (input). + Column indices (input). + Row pointers (input). + Matrix values in MSR format (output). + Row pointers and column indices (output). + Pointer to diagonal elements (output). + True if the modified/MILU algorithm should be used (recommended) + Returns 0 on success or k > 0 if a zero pivot was encountered at step k. + + + + A Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Bi-Conjugate Gradient Stabilized (BiCGStab) solver is an 'improvement' + of the standard Conjugate Gradient (CG) solver. Unlike the CG solver the + BiCGStab can be used on non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The Bi-CGSTAB algorithm was taken from:
+ Templates for the solution of linear systems: Building blocks + for iterative methods +
+ Richard Barrett, Michael Berry, Tony F. Chan, James Demmel, + June M. Donato, Jack Dongarra, Victor Eijkhout, Roldan Pozo, + Charles Romine and Henk van der Vorst +
+ Url: http://www.netlib.org/templates/Templates.html +
+ Algorithm is described in Chapter 2, section 2.3.8, page 27 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient , A. + The solution , b. + The result , x. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A composite matrix solver. The actual solver is made by a sequence of + matrix solvers. + + + + Solver based on:
+ Faster PDE-based simulations using robust composite linear solvers
+ S. Bhowmicka, P. Raghavan a,*, L. McInnes b, B. Norris
+ Future Generation Computer Systems, Vol 20, 2004, pp 373–387
+
+ + Note that if an iterator is passed to this solver it will be used for all the sub-solvers. + +
+
+ + + The collection of solvers that will be used + + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Generalized Product Bi-Conjugate Gradient iterative matrix solver. + + + + The Generalized Product Bi-Conjugate Gradient (GPBiCG) solver is an + alternative version of the Bi-Conjugate Gradient stabilized (CG) solver. + Unlike the CG solver the GPBiCG solver can be used on + non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The GPBiCG algorithm was taken from:
+ GPBiCG(m,l): A hybrid of BiCGSTAB and GPBiCG methods with + efficiency and robustness +
+ S. Fujino +
+ Applied Numerical Mathematics, Volume 41, 2002, pp 107 - 117 +
+
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Indicates the number of BiCGStab steps should be taken + before switching. + + + + + Indicates the number of GPBiCG steps should be taken + before switching. + + + + + Gets or sets the number of steps taken with the BiCgStab algorithm + before switching over to the GPBiCG algorithm. + + + + + Gets or sets the number of steps taken with the GPBiCG algorithm + before switching over to the BiCgStab algorithm. + + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Decide if to do steps with BiCgStab + + Number of iteration + true if yes, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Multiple-Lanczos Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Multiple-Lanczos Bi-Conjugate Gradient stabilized (ML(k)-BiCGStab) solver is an 'improvement' + of the standard BiCgStab solver. + + + The algorithm was taken from:
+ ML(k)BiCGSTAB: A BiCGSTAB variant based on multiple Lanczos starting vectors +
+ Man-chung Yeung and Tony F. Chan +
+ SIAM Journal of Scientific Computing +
+ Volume 21, Number 4, pp. 1263 - 1290 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + The default number of starting vectors. + + + + + The collection of starting vectors which are used as the basis for the Krylov sub-space. + + + + + The number of starting vectors used by the algorithm + + + + + Gets or sets the number of starting vectors. + + + Must be larger than 1 and smaller than the number of variables in the matrix that + for which this solver will be used. + + + + + Resets the number of starting vectors to the default value. + + + + + Gets or sets a series of orthonormal vectors which will be used as basis for the + Krylov sub-space. + + + + + Gets the number of starting vectors to create + + Maximum number + Number of variables + Number of starting vectors to create + + + + Returns an array of starting vectors. + + The maximum number of starting vectors that should be created. + The number of variables. + + An array with starting vectors. The array will never be larger than the + but it may be smaller if + the is smaller than + the . + + + + + Create random vectors array + + Number of vectors + Size of each vector + Array of random vectors + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Source A. + Residual data. + x data. + b data. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Transpose Free Quasi-Minimal Residual (TFQMR) iterative matrix solver. + + + + The TFQMR algorithm was taken from:
+ Iterative methods for sparse linear systems. +
+ Yousef Saad +
+ Algorithm is described in Chapter 7, section 7.4.3, page 219 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Is even? + + Number to check + true if even, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A diagonal preconditioner. The preconditioner uses the inverse + of the matrix diagonal as preconditioning values. + + + + + The inverse of the matrix diagonal. + + + + + Returns the decomposed matrix diagonal. + + The matrix diagonal. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + This class performs an Incomplete LU factorization with drop tolerance + and partial pivoting. The drop tolerance indicates which additional entries + will be dropped from the factorized LU matrices. + + + The ILUTP-Mem algorithm was taken from:
+ ILUTP_Mem: a Space-Efficient Incomplete LU Preconditioner +
+ Tzu-Yi Chen, Department of Mathematics and Computer Science,
+ Pomona College, Claremont CA 91711, USA
+ Published in:
+ Lecture Notes in Computer Science
+ Volume 3046 / 2004
+ pp. 20 - 28
+ Algorithm is described in Section 2, page 22 +
+
+ + + The default fill level. + + + + + The default drop tolerance. + + + + + The decomposed upper triangular matrix. + + + + + The decomposed lower triangular matrix. + + + + + The array containing the pivot values. + + + + + The fill level. + + + + + The drop tolerance. + + + + + The pivot tolerance. + + + + + Initializes a new instance of the class with the default settings. + + + + + Initializes a new instance of the class with the specified settings. + + + The amount of fill that is allowed in the matrix. The value is a fraction of + the number of non-zero entries in the original matrix. Values should be positive. + + + The absolute drop tolerance which indicates below what absolute value an entry + will be dropped from the matrix. A drop tolerance of 0.0 means that no values + will be dropped. Values should always be positive. + + + The pivot tolerance which indicates at what level pivoting will take place. A + value of 0.0 means that no pivoting will take place. + + + + + Gets or sets the amount of fill that is allowed in the matrix. The + value is a fraction of the number of non-zero entries in the original + matrix. The standard value is 200. + + + + Values should always be positive and can be higher than 1.0. A value lower + than 1.0 means that the eventual preconditioner matrix will have fewer + non-zero entries as the original matrix. A value higher than 1.0 means that + the eventual preconditioner can have more non-zero values than the original + matrix. + + + Note that any changes to the FillLevel after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the absolute drop tolerance which indicates below what absolute value + an entry will be dropped from the matrix. The standard value is 0.0001. + + + + The values should always be positive and can be larger than 1.0. A low value will + keep more small numbers in the preconditioner matrix. A high value will remove + more small numbers from the preconditioner matrix. + + + Note that any changes to the DropTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the pivot tolerance which indicates at what level pivoting will + take place. The standard value is 0.0 which means pivoting will never take place. + + + + The pivot tolerance is used to calculate if pivoting is necessary. Pivoting + will take place if any of the values in a row is bigger than the + diagonal value of that row divided by the pivot tolerance, i.e. pivoting + will take place if row(i,j) > row(i,i) / PivotTolerance for + any j that is not equal to i. + + + Note that any changes to the PivotTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the lower triagonal elements. + + + + Returns the pivot array. This array is not needed for normal use because + the preconditioner will return the solution vector values in the proper order. + + + This method is used for debugging purposes only and should normally not be used. + + The pivot array. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. Note that the + method takes a general matrix type. However internally the data is stored + as a sparse matrix. Therefore it is not recommended to pass a dense matrix. + + If is . + If is not a square matrix. + + + + Pivot elements in the according to internal pivot array + + Row to pivot in + + + + Was pivoting already performed + + Pivots already done + Current item to pivot + true if performed, otherwise false + + + + Swap columns in the + + Source . + First column index to swap + Second column index to swap + + + + Sort vector descending, not changing vector but placing sorted indicies to + + Start sort form + Sort till upper bound + Array with sorted vector indicies + Source + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + Pivot elements in according to internal pivot array + + Source . + Result after pivoting. + + + + An element sort algorithm for the class. + + + This sort algorithm is used to sort the columns in a sparse matrix based on + the value of the element on the diagonal of the matrix. + + + + + Sorts the elements of the vector in decreasing + fashion. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Sorts the elements of the vector in decreasing + fashion using heap sort algorithm. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Build heap for double indicies + + Root position + Length of + Indicies of + Target + + + + Sift double indicies + + Indicies of + Target + Root position + Length of + + + + Sorts the given integers in a decreasing fashion. + + The values. + + + + Sort the given integers in a decreasing fashion using heapsort algorithm + + Array of values to sort + Length of + + + + Build heap + + Target values array + Root position + Length of + + + + Sift values + + Target value array + Root position + Length of + + + + Exchange values in array + + Target values array + First value to exchange + Second value to exchange + + + + An incomplete, level 0, LU factorization preconditioner. + + + The ILU(0) algorithm was taken from:
+ Iterative methods for sparse linear systems
+ Yousef Saad
+ Algorithm is described in Chapter 10, section 10.3.2, page 275
+
+
+ + + The matrix holding the lower (L) and upper (U) matrices. The + decomposition matrices are combined to reduce storage. + + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + A new matrix containing the lower triagonal elements. + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + A class which encapsulates the functionality of a Cholesky factorization. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Gets the determinant of the matrix for which the Cholesky matrix was computed. + + + + + Gets the log determinant of the matrix for which the Cholesky matrix was computed. + + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Gets the absolute value of determinant of the square matrix for which the EVD was computed. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + In the Math.Net implementation we also store a set of pivot elements for increased + numerical stability. The pivot elements encode a permutation matrix P such that P*A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Gets the determinant of the matrix for which the LU factorization was computed. + + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A (m x n) may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + If a factorization is peformed, the resulting Q matrix is an m x m matrix + and the R matrix is an m x n matrix. If a factorization is performed, the + resulting Q matrix is an m x n matrix and the R matrix is an n x n matrix. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD). + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets the two norm of the . + + The 2-norm of the . + + + + Gets the condition number max(S) / min(S) + + The condition number. + + + + Gets the determinant of the square matrix for which the SVD was computed. + + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any complex square matrix A may be decomposed as A = QR where Q is an unitary mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an unitary matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Factorize matrix using the modified Gram-Schmidt method. + + Initial matrix. On exit is replaced by Q. + Number of rows in Q. + Number of columns in Q. + On exit is filled by R. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a complex matrix. + + + If A is hermitan, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is hermitan. + I.e. A = V*D*V' and V*VH=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Reduces a complex hermitian matrix to a real symmetric tridiagonal matrix using unitary similarity transformations. + + Source matrix to reduce + Output: Arrays for internal storage of real parts of eigenvalues + Output: Arrays for internal storage of imaginary parts of eigenvalues + Output: Arrays that contains further information about the transformations. + Order of initial matrix + This is derived from the Algol procedures HTRIDI by + Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + Data array of matrix V (eigenvectors) + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Determines eigenvectors by undoing the symmetric tridiagonalize transformation + + Data array of matrix V (eigenvectors) + Previously tridiagonalized matrix by . + Contains further information about the transformations + Input matrix order + This is derived from the Algol procedures HTRIBK, by + by Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Nonsymmetric reduction to Hessenberg form. + + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + Data array of the eigenvectors + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of a Cholesky factorization for dense matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Gets or sets Tau vector. Contains additional information on Q - used for native solver. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The QR factorization method to use. + If is null. + If row count is less then column count + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + If SVD algorithm failed to converge with matrix . + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any complex square matrix A may be decomposed as A = QR where Q is an unitary mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an unitary matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of a Cholesky factorization for user matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a complex matrix. + + + If A is hermitan, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is hermitan. + I.e. A = V*D*V' and V*VH=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Reduces a complex hermitian matrix to a real symmetric tridiagonal matrix using unitary similarity transformations. + + Source matrix to reduce + Output: Arrays for internal storage of real parts of eigenvalues + Output: Arrays for internal storage of imaginary parts of eigenvalues + Output: Arrays that contains further information about the transformations. + Order of initial matrix + This is derived from the Algol procedures HTRIDI by + Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + The eigen vectors to work on. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Determines eigenvectors by undoing the symmetric tridiagonalize transformation + + The eigen vectors to work on. + Previously tridiagonalized matrix by . + Contains further information about the transformations + Input matrix order + This is derived from the Algol procedures HTRIBK, by + by Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Nonsymmetric reduction to Hessenberg form. + + The eigen vectors to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + The eigen vectors to work on. + The eigen values to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The QR factorization method to use. + If is null. + + + + Generate column from initial matrix to work array + + Initial matrix + The first row + Column index + Generated vector + + + + Perform calculation of Q or R + + Work array + Q or R matrices + The first row + The last row + The first column + The last column + Number of available CPUs + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + + + + + Calculates absolute value of multiplied on signum function of + + Complex32 value z1 + Complex32 value z2 + Result multiplication of signum function and absolute value + + + + Interchanges two vectors and + + Source matrix + The number of rows in + Column A index to swap + Column B index to swap + + + + Scale column by starting from row + + Source matrix + The number of rows in + Column to scale + Row to scale from + Scale value + + + + Scale vector by starting from index + + Source vector + Row to scale from + Scale value + + + + Given the Cartesian coordinates (da, db) of a point p, these fucntion return the parameters da, db, c, and s + associated with the Givens rotation that zeros the y-coordinate of the point. + + Provides the x-coordinate of the point p. On exit contains the parameter r associated with the Givens rotation + Provides the y-coordinate of the point p. On exit contains the parameter z associated with the Givens rotation + Contains the parameter c associated with the Givens rotation + Contains the parameter s associated with the Givens rotation + This is equivalent to the DROTG LAPACK routine. + + + + Calculate Norm 2 of the column in matrix starting from row + + Source matrix + The number of rows in + Column index + Start row index + Norm2 (Euclidean norm) of the column + + + + Calculate Norm 2 of the vector starting from index + + Source vector + Start index + Norm2 (Euclidean norm) of the vector + + + + Calculate dot product of and conjugating the first vector. + + Source matrix + The number of rows in + Index of column A + Index of column B + Starting row index + Dot product value + + + + Performs rotation of points in the plane. Given two vectors x and y , + each vector element of these vectors is replaced as follows: x(i) = c*x(i) + s*y(i); y(i) = c*y(i) - s*x(i) + + Source matrix + The number of rows in + Index of column A + Index of column B + scalar cos value + scalar sin value + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A Matrix class with dense storage. The underlying storage is a one dimensional array in column-major order (column by column). + + + + + Number of rows. + + Using this instead of the RowCount property to speed up calculating + a matrix index in the data array. + + + + Number of columns. + + Using this instead of the ColumnCount property to speed up calculating + a matrix index in the data array. + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new dense matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new dense matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to be in column-major order (column by column) and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + + Create a new dense matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable. + The enumerable is assumed to be in column-major order (column by column). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix and initialize each value to the same provided value. + + + + + Create a new dense matrix and initialize each value using the provided init function. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new dense matrix with values sampled from the provided random distribution. + + + + + Gets the matrix's data. + + The matrix's data. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of add + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the matrix and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Evaluates whether this matrix is symmetric. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A matrix type for diagonal matrices. + + + Diagonal matrices can be non-square matrices but the diagonal always starts + at element 0,0. A diagonal matrix will throw an exception if non diagonal + entries are set. The exception to this is when the off diagonal elements are + 0.0 or NaN; these settings will cause no change to the diagonal matrix. + + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new diagonal matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All diagonal cells of the matrix will be initialized to the provided value, all non-diagonal ones to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to contain the diagonal elements only and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new diagonal matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + The matrix to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + The array to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided enumerable. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new diagonal matrix with diagonal values sampled from the provided random distribution. + + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to add. + The matrix to store the result of the division. + + + + Computes the determinant of this matrix. + + The determinant of this matrix. + + + + Returns the elements of the diagonal in a . + + The elements of the diagonal. + For non-square matrices, the method returns Min(Rows, Columns) elements where + i == j (i is the row index, and j is the column index). + + + + Copies the values of the given array to the diagonal. + + The array to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + + Copies the values of the given to the diagonal. + + The vector to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced L2 norm of the matrix. + The largest singular value of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + Calculates the condition number of this matrix. + The condition number of the matrix. + + + Computes the inverse of this matrix. + If is not a square matrix. + If is singular. + The inverse of this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Creates a matrix that contains the values from the requested sub-matrix. + + The row to start copying from. + The number of rows to copy. Must be positive. + The column to start copying from. + The number of columns to copy. Must be positive. + The requested sub-matrix. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + If or + is not positive. + + + + Permute the columns of a matrix according to a permutation. + + The column permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Permute the rows of a matrix according to a permutation. + + The row permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Evaluates whether this matrix is symmetric. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + Complex32 version of the class. + + + + + Initializes a new instance of the Matrix class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Returns the conjugate transpose of this matrix. + + The conjugate transpose of this matrix. + + + + Puts the conjugate transpose of this matrix into the result matrix. + + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to divide by each element of the matrix. + The matrix to store the result of the division. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The matrix to store the result of the pointwise power. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Pointwise applies the exponential function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Computes the Moore-Penrose Pseudo-Inverse of this matrix. + + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Calculates the p-norms of all row vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the p-norms of all column vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all row vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all column vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the value sum of each row vector. + + + + + Calculates the absolute value sum of each row vector. + + + + + Calculates the value sum of each column vector. + + + + + Calculates the absolute value sum of each column vector. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A Matrix with sparse storage, intended for very large matrices where most of the cells are zero. + The underlying storage scheme is 3-array compressed-sparse-row (CSR) Format. + Wikipedia - CSR. + + + + + Gets the number of non zero elements in the matrix. + + The number of non zero elements. + + + + Create a new sparse matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new sparse matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable. + The enumerable is assumed to be in row-major order (row by row). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + + Create a new sparse matrix with the given number of rows and columns as a copy of the given array. + The array is assumed to be in column-major order (column by column). + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix and initialize each value to the same provided value. + + + + + Create a new sparse matrix and initialize each value using the provided init function. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Evaluates whether this matrix is symmetric. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Complex32 version of the class. + + + + + Initializes a new instance of the Vector class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Conjugates vector and save result to + + Target vector + + + + Negates vector and saves result to + + Target vector + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Divides each element of the vector by a scalar and stores the result in the result vector. + + + The scalar to divide with. + + + The vector to store the result of the division. + + + + + Divides a scalar by each element of the vector and stores the result in the result vector. + + The scalar to divide. + The vector to store the result of the division. + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise raise this vector to an exponent and store the result into the result vector. + + The exponent to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Returns the value of the absolute minimum element. + + The value of the absolute minimum element. + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the value of the absolute maximum element. + + The value of the absolute maximum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + + The p value. + + + Scalar ret = ( ∑|At(i)|^p )^(1/p) + + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Normalizes this vector to a unit vector with respect to the p-norm. + + + The p value. + + + This vector normalized to a unit vector with respect to the p-norm. + + + + + A vector using dense storage. + + + + + Number of elements + + + + + Gets the vector's data. + + + + + Create a new dense vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new dense vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new dense vector directly binding to a raw array. + The array is used directly without copying. + Very efficient, but changes to the array and the vector will affect each other. + + + + + Create a new dense vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector and initialize each value using the provided value. + + + + + Create a new dense vector and initialize each value using the provided init function. + + + + + Create a new dense vector with values sampled from the provided random distribution. + + + + + Gets the vector's data. + + The vector's data. + + + + Returns a reference to the internal data structure. + + The DenseVector whose internal data we are + returning. + + A reference to the internal date of the given vector. + + + + + Returns a vector bound directly to a reference of the provided array. + + The array to bind to the DenseVector object. + + A DenseVector whose values are bound to the given array. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + The scalar to add. + The vector to store the result of the addition. + + + + Adds another vector to this vector and stores the result into the result vector. + + The vector to add to this one. + The vector to store the result of the addition. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The vector to store the result of the subtraction. + + + + Subtracts another vector from this vector and stores the result into the result vector. + + The vector to subtract from this one. + The vector to store the result of the subtraction. + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Negates vector and saves result to + + Target vector + + + + Conjugates vector and save result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + The scalar to multiply. + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Multiplies a vector with a complex. + + The vector to scale. + The Complex32 value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a complex. + + The Complex32 value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a complex. + + The vector to divide. + The Complex32 value. + The result of the division. + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Creates a Complex32 dense vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n;n;..', '(n;n;..)', '[n;n;...]', where n is a double. + + + A Complex32 dense vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a complex dense vector to double-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a complex dense vector to double-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + A vector with sparse storage, intended for very large vectors where most of the cells are zero. + + The sparse vector is not thread safe. + + + + Gets the number of non zero elements in the vector. + + The number of non zero elements. + + + + Create a new sparse vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new sparse vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new sparse vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector and initialize each value using the provided value. + + + + + Create a new sparse vector and initialize each value using the provided init function. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + Warning, the new 'sparse vector' with a non-zero scalar added to it will be a 100% filled + sparse vector and very inefficient. Would be better to work with a dense vector instead. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Negates vector and saves result to + + Target vector + + + + Conjugates vector and save result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Multiplies a vector with a complex. + + The vector to scale. + The complex value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a complex. + + The complex value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a complex. + + The vector to divide. + The complex value. + The result of the division. + If is . + + + + Computes the modulus of each element of the vector of the given divisor. + + The vector whose elements we want to compute the modulus of. + The divisor to use, + The result of the calculation + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Creates a double sparse vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n;n;..', '(n;n;..)', '[n;n;...]', where n is a Complex32. + + + A double sparse vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a complex sparse vector to double-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a complex sparse vector to double-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Generic linear algebra type builder, for situations where a matrix or vector + must be created in a generic way. Usage of generic builders should not be + required in normal user code. + + + + + Gets the value of 0.0 for type T. + + + + + Gets the value of 1.0 for type T. + + + + + Create a new matrix straight from an initialized matrix storage instance. + If you have an instance of a discrete storage type instead, use their direct methods instead. + + + + + Create a new matrix with the same kind of the provided example. + + + + + Create a new matrix with the same kind and dimensions of the provided example. + + + + + Create a new matrix with the same kind of the provided example. + + + + + Create a new matrix with a type that can represent and is closest to both provided samples. + + + + + Create a new matrix with a type that can represent and is closest to both provided samples and the dimensions of example. + + + + + Create a new dense matrix with values sampled from the provided random distribution. + + + + + Create a new dense matrix with values sampled from the standard distribution with a system random source. + + + + + Create a new dense matrix with values sampled from the standard distribution with a system random source. + + + + + Create a new positive definite dense matrix where each value is the product + of two samples from the provided random distribution. + + + + + Create a new positive definite dense matrix where each value is the product + of two samples from the standard distribution. + + + + + Create a new positive definite dense matrix where each value is the product + of two samples from the provided random distribution. + + + + + Create a new dense matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + + + + Create a new dense matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to be in column-major order (column by column) and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + + Create a new dense matrix and initialize each value to the same provided value. + + + + + Create a new dense matrix and initialize each value using the provided init function. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value using the provided init function. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new dense matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable. + The enumerable is assumed to be in column-major order (column by column). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable. + The enumerable is assumed to be in row-major order (row by row). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix from a 2D array of existing matrices. + The matrices in the array are not required to be dense already. + If the matrices do not align properly, they are placed on the top left + corner of their cell with the remaining fields left zero. + + + + + Create a new sparse matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a sparse matrix of T with the given number of rows and columns. + + The number of rows. + The number of columns. + + + + Create a new sparse matrix and initialize each value to the same provided value. + + + + + Create a new sparse matrix and initialize each value using the provided init function. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value using the provided init function. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new sparse matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable. + The enumerable is assumed to be in row-major order (row by row). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + + Create a new sparse matrix with the given number of rows and columns as a copy of the given array. + The array is assumed to be in column-major order (column by column). + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix from a 2D array of existing matrices. + The matrices in the array are not required to be sparse already. + If the matrices do not align properly, they are placed on the top left + corner of their cell with the remaining fields left zero. + + + + + Create a new diagonal matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + + + + Create a new diagonal matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to represent the diagonal values and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new square diagonal matrix directly binding to a raw array. + The array is assumed to represent the diagonal values and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new diagonal matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal matrix and initialize each diagonal value using the provided init function. + + + + + Create a new diagonal identity matrix with a one-diagonal. + + + + + Create a new diagonal identity matrix with a one-diagonal. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Generic linear algebra type builder, for situations where a matrix or vector + must be created in a generic way. Usage of generic builders should not be + required in normal user code. + + + + + Gets the value of 0.0 for type T. + + + + + Gets the value of 1.0 for type T. + + + + + Create a new vector straight from an initialized matrix storage instance. + If you have an instance of a discrete storage type instead, use their direct methods instead. + + + + + Create a new vector with the same kind of the provided example. + + + + + Create a new vector with the same kind and dimension of the provided example. + + + + + Create a new vector with the same kind of the provided example. + + + + + Create a new vector with a type that can represent and is closest to both provided samples. + + + + + Create a new vector with a type that can represent and is closest to both provided samples and the dimensions of example. + + + + + Create a new vector with a type that can represent and is closest to both provided samples. + + + + + Create a new dense vector with values sampled from the provided random distribution. + + + + + Create a new dense vector with values sampled from the standard distribution with a system random source. + + + + + Create a new dense vector with values sampled from the standard distribution with a system random source. + + + + + Create a new dense vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a dense vector of T with the given size. + + The size of the vector. + + + + Create a dense vector of T that is directly bound to the specified array. + + + + + Create a new dense vector and initialize each value using the provided value. + + + + + Create a new dense vector and initialize each value using the provided init function. + + + + + Create a new dense vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a sparse vector of T with the given size. + + The size of the vector. + + + + Create a new sparse vector and initialize each value using the provided value. + + + + + Create a new sparse vector and initialize each value using the provided init function. + + + + + Create a new sparse vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Defines the base class for Matrix classes. + + + Defines the base class for Matrix classes. + + + Defines the base class for Matrix classes. + + + Defines the base class for Matrix classes. + + Supported data types are double, single, , and . + + + + Computes the Cholesky decomposition for a matrix. + + The Cholesky decomposition object. + + + + Computes the LU decomposition for a matrix. + + The LU decomposition object. + + + + Computes the QR decomposition for a matrix. + + The type of QR factorization to perform. + The QR decomposition object. + + + + Computes the QR decomposition for a matrix using Modified Gram-Schmidt Orthogonalization. + + The QR decomposition object. + + + + Computes the SVD decomposition for a matrix. + + Compute the singular U and VT vectors or not. + The SVD decomposition object. + + + + Computes the EVD decomposition for a matrix. + + The EVD decomposition object. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix (this matrix), b is the solution vector and x is the unknown vector. + + The solution vector b. + The result vector x. + The iterative solver to use. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + Solves the matrix equation AX = B, where A is the coefficient matrix (this matrix), B is the solution matrix and X is the unknown matrix. + + The solution matrix B. + The result matrix X + The iterative solver to use. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix (this matrix), b is the solution vector and x is the unknown vector. + + The solution vector b. + The result vector x. + The iterative solver to use. + Criteria to control when to stop iterating. + The preconditioner to use for approximations. + + + + Solves the matrix equation AX = B, where A is the coefficient matrix (this matrix), B is the solution matrix and X is the unknown matrix. + + The solution matrix B. + The result matrix X + The iterative solver to use. + Criteria to control when to stop iterating. + The preconditioner to use for approximations. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix (this matrix), b is the solution vector and x is the unknown vector. + + The solution vector b. + The result vector x. + The iterative solver to use. + Criteria to control when to stop iterating. + + + + Solves the matrix equation AX = B, where A is the coefficient matrix (this matrix), B is the solution matrix and X is the unknown matrix. + + The solution matrix B. + The result matrix X + The iterative solver to use. + Criteria to control when to stop iterating. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix (this matrix), b is the solution vector and x is the unknown vector. + + The solution vector b. + The iterative solver to use. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + The result vector x. + + + + Solves the matrix equation AX = B, where A is the coefficient matrix (this matrix), B is the solution matrix and X is the unknown matrix. + + The solution matrix B. + The iterative solver to use. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + The result matrix X. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix (this matrix), b is the solution vector and x is the unknown vector. + + The solution vector b. + The iterative solver to use. + Criteria to control when to stop iterating. + The preconditioner to use for approximations. + The result vector x. + + + + Solves the matrix equation AX = B, where A is the coefficient matrix (this matrix), B is the solution matrix and X is the unknown matrix. + + The solution matrix B. + The iterative solver to use. + Criteria to control when to stop iterating. + The preconditioner to use for approximations. + The result matrix X. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix (this matrix), b is the solution vector and x is the unknown vector. + + The solution vector b. + The iterative solver to use. + Criteria to control when to stop iterating. + The result vector x. + + + + Solves the matrix equation AX = B, where A is the coefficient matrix (this matrix), B is the solution matrix and X is the unknown matrix. + + The solution matrix B. + The iterative solver to use. + Criteria to control when to stop iterating. + The result matrix X. + + + + Indicates whether the current object is equal to another object of the same type. + + + An object to compare with this object. + + + true if the current object is equal to the parameter; otherwise, false. + + + + + Determines whether the specified is equal to this instance. + + The to compare with this instance. + + true if the specified is equal to this instance; otherwise, false. + + + + + Returns a hash code for this instance. + + + A hash code for this instance, suitable for use in hashing algorithms and data structures like a hash table. + + + + + Creates a new object that is a copy of the current instance. + + + A new object that is a copy of this instance. + + + + + Returns a string that describes the type, dimensions and shape of this matrix. + + + + + Returns a string 2D array that summarizes the content of this matrix. + + + + + Returns a string 2D array that summarizes the content of this matrix. + + + + + Returns a string that summarizes the content of this matrix. + + + + + Returns a string that summarizes the content of this matrix. + + + + + Returns a string that summarizes this matrix. + + + + + Returns a string that summarizes this matrix. + The maximum number of cells can be configured in the class. + + + + + Returns a string that summarizes this matrix. + The maximum number of cells can be configured in the class. + The format string is ignored. + + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Adds a scalar to each element of the matrix. + + This operator will allocate new memory for the result. It will + choose the representation of the provided matrix. + The left matrix to add. + The scalar value to add. + The result of the addition. + If is . + + + + Adds a scalar to each element of the matrix. + + This operator will allocate new memory for the result. It will + choose the representation of the provided matrix. + The scalar value to add. + The right matrix to add. + The result of the addition. + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the subtraction. + If and don't have the same dimensions. + If or is . + + + + Subtracts a scalar from each element of a matrix. + + This operator will allocate new memory for the result. It will + choose the representation of the provided matrix. + The left matrix to subtract. + The scalar value to subtract. + The result of the subtraction. + If and don't have the same dimensions. + If or is . + + + + Substracts each element of a matrix from a scalar. + + This operator will allocate new memory for the result. It will + choose the representation of the provided matrix. + The scalar value to subtract. + The right matrix to subtract. + The result of the subtraction. + If and don't have the same dimensions. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Divides a scalar with a matrix. + + The scalar to divide. + The matrix. + The result of the division. + If is . + + + + Divides a matrix with a scalar. + + The matrix to divide. + The scalar value. + The result of the division. + If is . + + + + Computes the pointwise remainder (% operator), where the result has the sign of the dividend, + of each element of the matrix of the given divisor. + + The matrix whose elements we want to compute the modulus of. + The divisor to use. + The result of the calculation + If is . + + + + Computes the pointwise remainder (% operator), where the result has the sign of the dividend, + of the given dividend of each element of the matrix. + + The dividend we want to compute the modulus of. + The matrix whose elements we want to use as divisor. + The result of the calculation + If is . + + + + Computes the pointwise remainder (% operator), where the result has the sign of the dividend, + of each element of two matrices. + + The matrix whose elements we want to compute the remainder of. + The divisor to use. + If and are not the same size. + If is . + + + + Computes the sqrt of a matrix pointwise + + The input matrix + + + + + Computes the exponential of a matrix pointwise + + The input matrix + + + + + Computes the log of a matrix pointwise + + The input matrix + + + + + Computes the log10 of a matrix pointwise + + The input matrix + + + + + Computes the sin of a matrix pointwise + + The input matrix + + + + + Computes the cos of a matrix pointwise + + The input matrix + + + + + Computes the tan of a matrix pointwise + + The input matrix + + + + + Computes the asin of a matrix pointwise + + The input matrix + + + + + Computes the acos of a matrix pointwise + + The input matrix + + + + + Computes the atan of a matrix pointwise + + The input matrix + + + + + Computes the sinh of a matrix pointwise + + The input matrix + + + + + Computes the cosh of a matrix pointwise + + The input matrix + + + + + Computes the tanh of a matrix pointwise + + The input matrix + + + + + Computes the absolute value of a matrix pointwise + + The input matrix + + + + + Computes the floor of a matrix pointwise + + The input matrix + + + + + Computes the ceiling of a matrix pointwise + + The input matrix + + + + + Computes the rounded value of a matrix pointwise + + The input matrix + + + + + The value of 1.0. + + + + + The value of 0.0. + + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the matrix and stores the result in the result matrix. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts each element of the matrix from a scalar and stores the result in the result matrix. + + The scalar to subtract from. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar denominator to use. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar numerator to use. + The matrix to store the result of the division. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The matrix to store the result of the pointwise power. + + + + Pointwise raise this matrix to an exponent matrix and store the result into the result matrix. + + The exponent matrix to raise this matrix values to. + The matrix to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Adds a scalar to each element of the matrix. + + The scalar to add. + The result of the addition. + If the two matrices don't have the same dimensions. + + + + Adds a scalar to each element of the matrix and stores the result in the result matrix. + + The scalar to add. + The matrix to store the result of the addition. + If the two matrices don't have the same dimensions. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The result of the addition. + If the two matrices don't have the same dimensions. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the matrix. + + The scalar to subtract. + A new matrix containing the subtraction of this matrix and the scalar. + + + + Subtracts a scalar from each element of the matrix and stores the result in the result matrix. + + The scalar to subtract. + The matrix to store the result of the subtraction. + If this matrix and are not the same size. + + + + Subtracts each element of the matrix from a scalar. + + The scalar to subtract from. + A new matrix containing the subtraction of the scalar and this matrix. + + + + Subtracts each element of the matrix from a scalar and stores the result in the result matrix. + + The scalar to subtract from. + The matrix to store the result of the subtraction. + If this matrix and are not the same size. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The result of the subtraction. + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + If the two matrices don't have the same dimensions. + + + + Multiplies each element of this matrix with a scalar. + + The scalar to multiply with. + The result of the multiplication. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + If the result matrix's dimensions are not the same as this matrix. + + + + Divides each element of this matrix with a scalar. + + The scalar to divide with. + The result of the division. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + If the result matrix's dimensions are not the same as this matrix. + + + + Divides a scalar by each element of the matrix. + + The scalar to divide. + The result of the division. + + + + Divides a scalar by each element of the matrix and places results into the result matrix. + + The scalar to divide. + The matrix to store the result of the division. + If the result matrix's dimensions are not the same as this matrix. + + + + Multiplies this matrix by a vector and returns the result. + + The vector to multiply with. + The result of the multiplication. + If this.ColumnCount != rightSide.Count. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + If result.Count != this.RowCount. + If this.ColumnCount != .Count. + + + + Left multiply a matrix with a vector ( = vector * matrix ). + + The vector to multiply with. + The result of the multiplication. + If this.RowCount != .Count. + + + + Left multiply a matrix with a vector ( = vector * matrix ) and place the result in the result vector. + + The vector to multiply with. + The result of the multiplication. + If result.Count != this.ColumnCount. + If this.RowCount != .Count. + + + + Left multiply a matrix with a vector ( = vector * matrix ) and place the result in the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + If this.Columns != other.Rows. + If the result matrix's dimensions are not the this.Rows x other.Columns. + + + + Multiplies this matrix with another matrix and returns the result. + + The matrix to multiply with. + If this.Columns != other.Rows. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + If this.Columns != other.ColumnCount. + If the result matrix's dimensions are not the this.RowCount x other.RowCount. + + + + Multiplies this matrix with transpose of another matrix and returns the result. + + The matrix to multiply with. + If this.Columns != other.ColumnCount. + The result of the multiplication. + + + + Multiplies the transpose of this matrix by a vector and returns the result. + + The vector to multiply with. + The result of the multiplication. + If this.RowCount != rightSide.Count. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + If result.Count != this.ColumnCount. + If this.RowCount != .Count. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + If this.Rows != other.RowCount. + If the result matrix's dimensions are not the this.ColumnCount x other.ColumnCount. + + + + Multiplies the transpose of this matrix with another matrix and returns the result. + + The matrix to multiply with. + If this.Rows != other.RowCount. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + If this.Columns != other.ColumnCount. + If the result matrix's dimensions are not the this.RowCount x other.RowCount. + + + + Multiplies this matrix with the conjugate transpose of another matrix and returns the result. + + The matrix to multiply with. + If this.Columns != other.ColumnCount. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix by a vector and returns the result. + + The vector to multiply with. + The result of the multiplication. + If this.RowCount != rightSide.Count. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + If result.Count != this.ColumnCount. + If this.RowCount != .Count. + + + + Multiplies the conjugate transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + If this.Rows != other.RowCount. + If the result matrix's dimensions are not the this.ColumnCount x other.ColumnCount. + + + + Multiplies the conjugate transpose of this matrix with another matrix and returns the result. + + The matrix to multiply with. + If this.Rows != other.RowCount. + The result of the multiplication. + + + + Raises this square matrix to a positive integer exponent and places the results into the result matrix. + + The positive integer exponent to raise the matrix to. + The result of the power. + + + + Multiplies this square matrix with another matrix and returns the result. + + The positive integer exponent to raise the matrix to. + + + + Negate each element of this matrix. + + A matrix containing the negated values. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + if the result matrix's dimensions are not the same as this matrix. + + + + Complex conjugate each element of this matrix. + + A matrix containing the conjugated values. + + + + Complex conjugate each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + if the result matrix's dimensions are not the same as this matrix. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the matrix. + + The scalar denominator to use. + A matrix containing the results. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the matrix. + + The scalar numerator to use. + A matrix containing the results. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the matrix. + + The scalar numerator to use. + Matrix to store the results in. + + + + Computes the remainder (matrix % divisor), where the result has the sign of the dividend, + for each element of the matrix. + + The scalar denominator to use. + A matrix containing the results. + + + + Computes the remainder (matrix % divisor), where the result has the sign of the dividend, + for each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (dividend % matrix), where the result has the sign of the dividend, + for each element of the matrix. + + The scalar numerator to use. + A matrix containing the results. + + + + Computes the remainder (dividend % matrix), where the result has the sign of the dividend, + for each element of the matrix. + + The scalar numerator to use. + Matrix to store the results in. + + + + Pointwise multiplies this matrix with another matrix. + + The matrix to pointwise multiply with this one. + If this matrix and are not the same size. + A new matrix that is the pointwise multiplication of this matrix and . + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + If this matrix and are not the same size. + If this matrix and are not the same size. + + + + Pointwise divide this matrix by another matrix. + + The pointwise denominator matrix to use. + If this matrix and are not the same size. + A new matrix that is the pointwise division of this matrix and . + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use. + The matrix to store the result of the pointwise division. + If this matrix and are not the same size. + If this matrix and are not the same size. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + + + + Pointwise raise this matrix to an exponent. + + The exponent to raise this matrix values to. + The matrix to store the result into. + If this matrix and are not the same size. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + + + + Pointwise raise this matrix to an exponent. + + The exponent to raise this matrix values to. + The matrix to store the result into. + If this matrix and are not the same size. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this matrix by another matrix. + + The pointwise denominator matrix to use. + If this matrix and are not the same size. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this matrix by another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use. + The matrix to store the result of the pointwise modulus. + If this matrix and are not the same size. + If this matrix and are not the same size. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this matrix by another matrix. + + The pointwise denominator matrix to use. + If this matrix and are not the same size. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this matrix by another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use. + The matrix to store the result of the pointwise remainder. + If this matrix and are not the same size. + If this matrix and are not the same size. + + + + Helper function to apply a unary function to a matrix. The function + f modifies the matrix given to it in place. Before its + called, a copy of the 'this' matrix is first created, then passed to + f. The copy is then returned as the result + + Function which takes a matrix, modifies it in place and returns void + New instance of matrix which is the result + + + + Helper function to apply a unary function which modifies a matrix + in place. + + Function which takes a matrix, modifies it in place and returns void + The matrix to be passed to f and where the result is to be stored + If this vector and are not the same size. + + + + Helper function to apply a binary function which takes two matrices + and modifies the latter in place. A copy of the "this" matrix is + first made and then passed to f together with the other matrix. The + copy is then returned as the result + + Function which takes two matrices, modifies the second in place and returns void + The other matrix to be passed to the function as argument. It is not modified + The resulting matrix + If this amtrix and are not the same dimension. + + + + Helper function to apply a binary function which takes two matrices + and modifies the second one in place + + Function which takes two matrices, modifies the second in place and returns void + The other matrix to be passed to the function as argument. It is not modified + The resulting matrix + If this matrix and are not the same dimension. + + + + Pointwise applies the exponent function to each value. + + + + + Pointwise applies the exponent function to each value. + + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the natural logarithm function to each value. + + + + + Pointwise applies the natural logarithm function to each value. + + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the abs function to each value + + + + + Pointwise applies the abs function to each value + + The vector to store the result + + + + Pointwise applies the acos function to each value + + + + + Pointwise applies the acos function to each value + + The vector to store the result + + + + Pointwise applies the asin function to each value + + + + + Pointwise applies the asin function to each value + + The vector to store the result + + + + Pointwise applies the atan function to each value + + + + + Pointwise applies the atan function to each value + + The vector to store the result + + + + Pointwise applies the atan2 function to each value of the current + matrix and a given other matrix being the 'x' of atan2 and the + 'this' matrix being the 'y' + + + + + + + Pointwise applies the atan2 function to each value of the current + matrix and a given other matrix being the 'x' of atan2 and the + 'this' matrix being the 'y' + + + + + + + Pointwise applies the ceiling function to each value + + + + + Pointwise applies the ceiling function to each value + + The vector to store the result + + + + Pointwise applies the cos function to each value + + + + + Pointwise applies the cos function to each value + + The vector to store the result + + + + Pointwise applies the cosh function to each value + + + + + Pointwise applies the cosh function to each value + + The vector to store the result + + + + Pointwise applies the floor function to each value + + + + + Pointwise applies the floor function to each value + + The vector to store the result + + + + Pointwise applies the log10 function to each value + + + + + Pointwise applies the log10 function to each value + + The vector to store the result + + + + Pointwise applies the round function to each value + + + + + Pointwise applies the round function to each value + + The vector to store the result + + + + Pointwise applies the sign function to each value + + + + + Pointwise applies the sign function to each value + + The vector to store the result + + + + Pointwise applies the sin function to each value + + + + + Pointwise applies the sin function to each value + + The vector to store the result + + + + Pointwise applies the sinh function to each value + + + + + Pointwise applies the sinh function to each value + + The vector to store the result + + + + Pointwise applies the sqrt function to each value + + + + + Pointwise applies the sqrt function to each value + + The vector to store the result + + + + Pointwise applies the tan function to each value + + + + + Pointwise applies the tan function to each value + + The vector to store the result + + + + Pointwise applies the tanh function to each value + + + + + Pointwise applies the tanh function to each value + + The vector to store the result + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + + Calculates the rank of the matrix. + + effective numerical rank, obtained from SVD + + + + Calculates the nullity of the matrix. + + effective numerical nullity, obtained from SVD + + + Calculates the condition number of this matrix. + The condition number of the matrix. + The condition number is calculated using singular value decomposition. + + + Computes the determinant of this matrix. + The determinant of this matrix. + + + + Computes an orthonormal basis for the null space of this matrix, + also known as the kernel of the corresponding matrix transformation. + + + + + Computes an orthonormal basis for the column space of this matrix, + also known as the range or image of the corresponding matrix transformation. + + + + Computes the inverse of this matrix. + The inverse of this matrix. + + + Computes the Moore-Penrose Pseudo-Inverse of this matrix. + + + + Computes the Kronecker product of this matrix with the given matrix. The new matrix is M-by-N + with M = this.Rows * lower.Rows and N = this.Columns * lower.Columns. + + The other matrix. + The Kronecker product of the two matrices. + + + + Computes the Kronecker product of this matrix with the given matrix. The new matrix is M-by-N + with M = this.Rows * lower.Rows and N = this.Columns * lower.Columns. + + The other matrix. + The Kronecker product of the two matrices. + If the result matrix's dimensions are not (this.Rows * lower.rows) x (this.Columns * lower.Columns). + + + + Pointwise applies the minimum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the minimum with a scalar to each value. + + The scalar value to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the maximum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the maximum with a scalar to each value. + + The scalar value to compare to. + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the absolute minimum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the absolute minimum with a scalar to each value. + + The scalar value to compare to. + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the absolute maximum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the absolute maximum with a scalar to each value. + + The scalar value to compare to. + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the minimum with the values of another matrix to each value. + + The matrix with the values to compare to. + + + + Pointwise applies the minimum with the values of another matrix to each value. + + The matrix with the values to compare to. + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the maximum with the values of another matrix to each value. + + The matrix with the values to compare to. + + + + Pointwise applies the maximum with the values of another matrix to each value. + + The matrix with the values to compare to. + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the absolute minimum with the values of another matrix to each value. + + The matrix with the values to compare to. + + + + Pointwise applies the absolute minimum with the values of another matrix to each value. + + The matrix with the values to compare to. + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the absolute maximum with the values of another matrix to each value. + + The matrix with the values to compare to. + + + + Pointwise applies the absolute maximum with the values of another matrix to each value. + + The matrix with the values to compare to. + The matrix to store the result. + If this matrix and are not the same size. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced L2 norm of the matrix. + The largest singular value of the matrix. + + For sparse matrices, the L2 norm is computed using a dense implementation of singular value decomposition. + In a later release, it will be replaced with a sparse implementation. + + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Calculates the p-norms of all row vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the p-norms of all column vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all row vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all column vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the value sum of each row vector. + + + + + Calculates the value sum of each column vector. + + + + + Calculates the absolute value sum of each row vector. + + + + + Calculates the absolute value sum of each column vector. + + + + + Initializes a new instance of the Matrix class. + + + + + Gets the raw matrix data storage. + + + + + Gets the number of columns. + + The number of columns. + + + + Gets the number of rows. + + The number of rows. + + + + Gets or sets the value at the given row and column, with range checking. + + + The row of the element. + + + The column of the element. + + The value to get or set. + This method is ranged checked. and + to get and set values without range checking. + + + + Retrieves the requested element without range checking. + + + The row of the element. + + + The column of the element. + + + The requested element. + + + + + Sets the value of the given element without range checking. + + + The row of the element. + + + The column of the element. + + + The value to set the element to. + + + + + Sets all values to zero. + + + + + Sets all values of a row to zero. + + + + + Sets all values of a column to zero. + + + + + Sets all values for all of the chosen rows to zero. + + + + + Sets all values for all of the chosen columns to zero. + + + + + Sets all values of a sub-matrix to zero. + + + + + Set all values whose absolute value is smaller than the threshold to zero, in-place. + + + + + Set all values that meet the predicate to zero, in-place. + + + + + Creates a clone of this instance. + + + A clone of the instance. + + + + + Copies the elements of this matrix to the given matrix. + + + The matrix to copy values into. + + + If target is . + + + If this and the target matrix do not have the same dimensions.. + + + + + Copies a row into an Vector. + + The row to copy. + A Vector containing the copied elements. + If is negative, + or greater than or equal to the number of rows. + + + + Copies a row into to the given Vector. + + The row to copy. + The Vector to copy the row into. + If the result vector is . + If is negative, + or greater than or equal to the number of rows. + If this.Columns != result.Count. + + + + Copies the requested row elements into a new Vector. + + The row to copy elements from. + The column to start copying from. + The number of elements to copy. + A Vector containing the requested elements. + If: + is negative, + or greater than or equal to the number of rows. + is negative, + or greater than or equal to the number of columns. + (columnIndex + length) >= Columns. + If is not positive. + + + + Copies the requested row elements into a new Vector. + + The row to copy elements from. + The column to start copying from. + The number of elements to copy. + The Vector to copy the column into. + If the result Vector is . + If is negative, + or greater than or equal to the number of columns. + If is negative, + or greater than or equal to the number of rows. + If + + is greater than or equal to the number of rows. + If is not positive. + If result.Count < length. + + + + Copies a column into a new Vector>. + + The column to copy. + A Vector containing the copied elements. + If is negative, + or greater than or equal to the number of columns. + + + + Copies a column into to the given Vector. + + The column to copy. + The Vector to copy the column into. + If the result Vector is . + If is negative, + or greater than or equal to the number of columns. + If this.Rows != result.Count. + + + + Copies the requested column elements into a new Vector. + + The column to copy elements from. + The row to start copying from. + The number of elements to copy. + A Vector containing the requested elements. + If: + is negative, + or greater than or equal to the number of columns. + is negative, + or greater than or equal to the number of rows. + (rowIndex + length) >= Rows. + + If is not positive. + + + + Copies the requested column elements into the given vector. + + The column to copy elements from. + The row to start copying from. + The number of elements to copy. + The Vector to copy the column into. + If the result Vector is . + If is negative, + or greater than or equal to the number of columns. + If is negative, + or greater than or equal to the number of rows. + If + + is greater than or equal to the number of rows. + If is not positive. + If result.Count < length. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Creates a matrix that contains the values from the requested sub-matrix. + + The row to start copying from. + The number of rows to copy. Must be positive. + The column to start copying from. + The number of columns to copy. Must be positive. + The requested sub-matrix. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + If or + is not positive. + + + + Returns the elements of the diagonal in a Vector. + + The elements of the diagonal. + For non-square matrices, the method returns Min(Rows, Columns) elements where + i == j (i is the row index, and j is the column index). + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Creates a new matrix and inserts the given column at the given index. + + The index of where to insert the column. + The column to insert. + A new matrix with the inserted column. + If is . + If is < zero or > the number of columns. + If the size of != the number of rows. + + + + Creates a new matrix with the given column removed. + + The index of the column to remove. + A new matrix without the chosen column. + If is < zero or >= the number of columns. + + + + Copies the values of the given Vector to the specified column. + + The column to copy the values to. + The vector to copy the values from. + If is . + If is less than zero, + or greater than or equal to the number of columns. + If the size of does not + equal the number of rows of this Matrix. + + + + Copies the values of the given Vector to the specified sub-column. + + The column to copy the values to. + The row to start copying to. + The number of elements to copy. + The vector to copy the values from. + If is . + If is less than zero, + or greater than or equal to the number of columns. + If the size of does not + equal the number of rows of this Matrix. + + + + Copies the values of the given array to the specified column. + + The column to copy the values to. + The array to copy the values from. + If is . + If is less than zero, + or greater than or equal to the number of columns. + If the size of does not + equal the number of rows of this Matrix. + If the size of does not + equal the number of rows of this Matrix. + + + + Creates a new matrix and inserts the given row at the given index. + + The index of where to insert the row. + The row to insert. + A new matrix with the inserted column. + If is . + If is < zero or > the number of rows. + If the size of != the number of columns. + + + + Creates a new matrix with the given row removed. + + The index of the row to remove. + A new matrix without the chosen row. + If is < zero or >= the number of rows. + + + + Copies the values of the given Vector to the specified row. + + The row to copy the values to. + The vector to copy the values from. + If is . + If is less than zero, + or greater than or equal to the number of rows. + If the size of does not + equal the number of columns of this Matrix. + + + + Copies the values of the given Vector to the specified sub-row. + + The row to copy the values to. + The column to start copying to. + The number of elements to copy. + The vector to copy the values from. + If is . + If is less than zero, + or greater than or equal to the number of rows. + If the size of does not + equal the number of columns of this Matrix. + + + + Copies the values of the given array to the specified row. + + The row to copy the values to. + The array to copy the values from. + If is . + If is less than zero, + or greater than or equal to the number of rows. + If the size of does not + equal the number of columns of this Matrix. + + + + Copies the values of a given matrix into a region in this matrix. + + The row to start copying to. + The column to start copying to. + The sub-matrix to copy from. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + + + + Copies the values of a given matrix into a region in this matrix. + + The row to start copying to. + The number of rows to copy. Must be positive. + The column to start copying to. + The number of columns to copy. Must be positive. + The sub-matrix to copy from. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + the size of is not at least x . + If or + is not positive. + + + + Copies the values of a given matrix into a region in this matrix. + + The row to start copying to. + The row of the sub-matrix to start copying from. + The number of rows to copy. Must be positive. + The column to start copying to. + The column of the sub-matrix to start copying from. + The number of columns to copy. Must be positive. + The sub-matrix to copy from. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + the size of is not at least x . + If or + is not positive. + + + + Copies the values of the given Vector to the diagonal. + + The vector to copy the values from. The length of the vector should be + Min(Rows, Columns). + If is . + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + + Copies the values of the given array to the diagonal. + + The array to copy the values from. The length of the vector should be + Min(Rows, Columns). + If is . + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + + Returns the transpose of this matrix. + + The transpose of this matrix. + + + + Puts the transpose of this matrix into the result matrix. + + + + + Returns the conjugate transpose of this matrix. + + The conjugate transpose of this matrix. + + + + Puts the conjugate transpose of this matrix into the result matrix. + + + + + Permute the rows of a matrix according to a permutation. + + The row permutation to apply to this matrix. + + + + Permute the columns of a matrix according to a permutation. + + The column permutation to apply to this matrix. + + + + Concatenates this matrix with the given matrix. + + The matrix to concatenate. + The combined matrix. + + + + + + Concatenates this matrix with the given matrix and places the result into the result matrix. + + The matrix to concatenate. + The combined matrix. + + + + + + Stacks this matrix on top of the given matrix and places the result into the result matrix. + + The matrix to stack this matrix upon. + The combined matrix. + If lower is . + If upper.Columns != lower.Columns. + + + + + + Stacks this matrix on top of the given matrix and places the result into the result matrix. + + The matrix to stack this matrix upon. + The combined matrix. + If lower is . + If upper.Columns != lower.Columns. + + + + + + Diagonally stacks his matrix on top of the given matrix. The new matrix is a M-by-N matrix, + where M = this.Rows + lower.Rows and N = this.Columns + lower.Columns. + The values of off the off diagonal matrices/blocks are set to zero. + + The lower, right matrix. + If lower is . + the combined matrix + + + + + + Diagonally stacks his matrix on top of the given matrix and places the combined matrix into the result matrix. + + The lower, right matrix. + The combined matrix + If lower is . + If the result matrix is . + If the result matrix's dimensions are not (this.Rows + lower.rows) x (this.Columns + lower.Columns). + + + + + + Evaluates whether this matrix is symmetric. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + Evaluates whether this matrix is conjugate symmetric. + + + + + Returns this matrix as a multidimensional array. + The returned array will be independent from this matrix. + A new memory block will be allocated for the array. + + A multidimensional containing the values of this matrix. + + + + Returns the matrix's elements as an array with the data laid out column by column (column major). + The returned array will be independent from this matrix. + A new memory block will be allocated for the array. + +
+            1, 2, 3
+            4, 5, 6  will be returned as  1, 4, 7, 2, 5, 8, 3, 6, 9
+            7, 8, 9
+            
+ An array containing the matrix's elements. + + +
+ + + Returns the matrix's elements as an array with the data laid row by row (row major). + The returned array will be independent from this matrix. + A new memory block will be allocated for the array. + +
+            1, 2, 3
+            4, 5, 6  will be returned as  1, 2, 3, 4, 5, 6, 7, 8, 9
+            7, 8, 9
+            
+ An array containing the matrix's elements. + + +
+ + + Returns this matrix as array of row arrays. + The returned arrays will be independent from this matrix. + A new memory block will be allocated for the arrays. + + + + + Returns this matrix as array of column arrays. + The returned arrays will be independent from this matrix. + A new memory block will be allocated for the arrays. + + + + + Returns the internal multidimensional array of this matrix if, and only if, this matrix is stored by such an array internally. + Otherwise returns null. Changes to the returned array and the matrix will affect each other. + Use ToArray instead if you always need an independent array. + + + + + Returns the internal column by column (column major) array of this matrix if, and only if, this matrix is stored by such arrays internally. + Otherwise returns null. Changes to the returned arrays and the matrix will affect each other. + Use ToColumnMajorArray instead if you always need an independent array. + +
+            1, 2, 3
+            4, 5, 6  will be returned as  1, 4, 7, 2, 5, 8, 3, 6, 9
+            7, 8, 9
+            
+ An array containing the matrix's elements. + + +
+ + + Returns the internal row by row (row major) array of this matrix if, and only if, this matrix is stored by such arrays internally. + Otherwise returns null. Changes to the returned arrays and the matrix will affect each other. + Use ToRowMajorArray instead if you always need an independent array. + +
+            1, 2, 3
+            4, 5, 6  will be returned as  1, 2, 3, 4, 5, 6, 7, 8, 9
+            7, 8, 9
+            
+ An array containing the matrix's elements. + + +
+ + + Returns the internal row arrays of this matrix if, and only if, this matrix is stored by such arrays internally. + Otherwise returns null. Changes to the returned arrays and the matrix will affect each other. + Use ToRowArrays instead if you always need an independent array. + + + + + Returns the internal column arrays of this matrix if, and only if, this matrix is stored by such arrays internally. + Otherwise returns null. Changes to the returned arrays and the matrix will affect each other. + Use ToColumnArrays instead if you always need an independent array. + + + + + Returns an IEnumerable that can be used to iterate through all values of the matrix. + + + The enumerator will include all values, even if they are zero. + The ordering of the values is unspecified (not necessarily column-wise or row-wise). + + + + + Returns an IEnumerable that can be used to iterate through all values of the matrix. + + + The enumerator will include all values, even if they are zero. + The ordering of the values is unspecified (not necessarily column-wise or row-wise). + + + + + Returns an IEnumerable that can be used to iterate through all values of the matrix and their index. + + + The enumerator returns a Tuple with the first two values being the row and column index + and the third value being the value of the element at that index. + The enumerator will include all values, even if they are zero. + + + + + Returns an IEnumerable that can be used to iterate through all values of the matrix and their index. + + + The enumerator returns a Tuple with the first two values being the row and column index + and the third value being the value of the element at that index. + The enumerator will include all values, even if they are zero. + + + + + Returns an IEnumerable that can be used to iterate through all non-zero values of the matrix. + + + The enumerator will skip all elements with a zero value. + + + + + Returns an IEnumerable that can be used to iterate through all non-zero values of the matrix and their index. + + + The enumerator returns a Tuple with the first two values being the row and column index + and the third value being the value of the element at that index. + The enumerator will skip all elements with a zero value. + + + + + Returns an IEnumerable that can be used to iterate through all columns of the matrix. + + + + + Returns an IEnumerable that can be used to iterate through a subset of all columns of the matrix. + + The column to start enumerating over. + The number of columns to enumerating over. + + + + Returns an IEnumerable that can be used to iterate through all columns of the matrix and their index. + + + The enumerator returns a Tuple with the first value being the column index + and the second value being the value of the column at that index. + + + + + Returns an IEnumerable that can be used to iterate through a subset of all columns of the matrix and their index. + + The column to start enumerating over. + The number of columns to enumerating over. + + The enumerator returns a Tuple with the first value being the column index + and the second value being the value of the column at that index. + + + + + Returns an IEnumerable that can be used to iterate through all rows of the matrix. + + + + + Returns an IEnumerable that can be used to iterate through a subset of all rows of the matrix. + + The row to start enumerating over. + The number of rows to enumerating over. + + + + Returns an IEnumerable that can be used to iterate through all rows of the matrix and their index. + + + The enumerator returns a Tuple with the first value being the row index + and the second value being the value of the row at that index. + + + + + Returns an IEnumerable that can be used to iterate through a subset of all rows of the matrix and their index. + + The row to start enumerating over. + The number of rows to enumerating over. + + The enumerator returns a Tuple with the first value being the row index + and the second value being the value of the row at that index. + + + + + Applies a function to each value of this matrix and replaces the value with its result. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + Applies a function to each value of this matrix and replaces the value with its result. + The row and column indices of each value (zero-based) are passed as first arguments to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + Applies a function to each value of this matrix and replaces the value in the result matrix. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + Applies a function to each value of this matrix and replaces the value in the result matrix. + The index of each value (zero-based) is passed as first argument to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + Applies a function to each value of this matrix and replaces the value in the result matrix. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + Applies a function to each value of this matrix and replaces the value in the result matrix. + The index of each value (zero-based) is passed as first argument to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + Applies a function to each value of this matrix and returns the results as a new matrix. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + Applies a function to each value of this matrix and returns the results as a new matrix. + The index of each value (zero-based) is passed as first argument to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + For each row, applies a function f to each element of the row, threading an accumulator argument through the computation. + Returns an array with the resulting accumulator states for each row. + + + + + For each column, applies a function f to each element of the column, threading an accumulator argument through the computation. + Returns an array with the resulting accumulator states for each column. + + + + + Applies a function f to each row vector, threading an accumulator vector argument through the computation. + Returns the resulting accumulator vector. + + + + + Applies a function f to each column vector, threading an accumulator vector argument through the computation. + Returns the resulting accumulator vector. + + + + + Reduces all row vectors by applying a function between two of them, until only a single vector is left. + + + + + Reduces all column vectors by applying a function between two of them, until only a single vector is left. + + + + + Applies a function to each value pair of two matrices and replaces the value in the result vector. + + + + + Applies a function to each value pair of two matrices and returns the results as a new vector. + + + + + Applies a function to update the status with each value pair of two matrices and returns the resulting status. + + + + + Returns a tuple with the index and value of the first element satisfying a predicate, or null if none is found. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns a tuple with the index and values of the first element pair of two matrices of the same size satisfying a predicate, or null if none is found. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if at least one element satisfies a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if at least one element pairs of two matrices of the same size satisfies a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if all elements satisfy a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if all element pairs of two matrices of the same size satisfy a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Defines the generic class for Vector classes. + + Supported data types are double, single, , and . + + + + The zero value for type T. + + + + + The value of 1.0 for type T. + + + + + Negates vector and save result to + + Target vector + + + + Complex conjugates vector and save result to + + Target vector + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + The scalar to add. + The vector to store the result of the addition. + + + + Adds another vector to this vector and stores the result into the result vector. + + The vector to add to this one. + The vector to store the result of the addition. + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The vector to store the result of the subtraction. + + + + Subtracts each element of the vector from a scalar and stores the result in the result vector. + + The scalar to subtract from. + The vector to store the result of the subtraction. + + + + Subtracts another vector to this vector and stores the result into the result vector. + + The vector to subtract from this one. + The vector to store the result of the subtraction. + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + The scalar to multiply. + The vector to store the result of the multiplication. + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Computes the outer product M[i,j] = u[i]*v[j] of this and another vector and stores the result in the result matrix. + + The other vector + The matrix to store the result of the product. + + + + Divides each element of the vector by a scalar and stores the result in the result vector. + + The scalar denominator to use. + The vector to store the result of the division. + + + + Divides a scalar by each element of the vector and stores the result in the result vector. + + The scalar numerator to use. + The vector to store the result of the division. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the division. + + + + Pointwise raise this vector to an exponent and store the result into the result vector. + + The exponent to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Adds a scalar to each element of the vector. + + The scalar to add. + A copy of the vector with the scalar added. + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + The scalar to add. + The vector to store the result of the addition. + If this vector and are not the same size. + + + + Adds another vector to this vector. + + The vector to add to this one. + A new vector containing the sum of both vectors. + If this vector and are not the same size. + + + + Adds another vector to this vector and stores the result into the result vector. + + The vector to add to this one. + The vector to store the result of the addition. + If this vector and are not the same size. + If this vector and are not the same size. + + + + Subtracts a scalar from each element of the vector. + + The scalar to subtract. + A new vector containing the subtraction of this vector and the scalar. + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The vector to store the result of the subtraction. + If this vector and are not the same size. + + + + Subtracts each element of the vector from a scalar. + + The scalar to subtract from. + A new vector containing the subtraction of the scalar and this vector. + + + + Subtracts each element of the vector from a scalar and stores the result in the result vector. + + The scalar to subtract from. + The vector to store the result of the subtraction. + If this vector and are not the same size. + + + + Returns a negated vector. + + The negated vector. + Added as an alternative to the unary negation operator. + + + + Negates vector and save result to + + Target vector + + + + Subtracts another vector from this vector. + + The vector to subtract from this one. + A new vector containing the subtraction of the the two vectors. + If this vector and are not the same size. + + + + Subtracts another vector to this vector and stores the result into the result vector. + + The vector to subtract from this one. + The vector to store the result of the subtraction. + If this vector and are not the same size. + If this vector and are not the same size. + + + + Return vector with complex conjugate values of the source vector + + Conjugated vector + + + + Complex conjugates vector and save result to + + Target vector + + + + Multiplies a scalar to each element of the vector. + + The scalar to multiply. + A new vector that is the multiplication of the vector and the scalar. + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + The scalar to multiply. + The vector to store the result of the multiplication. + If this vector and are not the same size. + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + If is not of the same size. + + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + If is not of the same size. + If is . + + + + + Divides each element of the vector by a scalar. + + The scalar to divide with. + A new vector that is the division of the vector and the scalar. + + + + Divides each element of the vector by a scalar and stores the result in the result vector. + + The scalar to divide with. + The vector to store the result of the division. + If this vector and are not the same size. + + + + Divides a scalar by each element of the vector. + + The scalar to divide. + A new vector that is the division of the vector and the scalar. + + + + Divides a scalar by each element of the vector and stores the result in the result vector. + + The scalar to divide. + The vector to store the result of the division. + If this vector and are not the same size. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector containing the result. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector containing the result. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (vector % divisor), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector containing the result. + + + + Computes the remainder (vector % divisor), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (dividend % vector), where the result has the sign of the dividend, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector containing the result. + + + + Computes the remainder (dividend % vector), where the result has the sign of the dividend, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Pointwise multiplies this vector with another vector. + + The vector to pointwise multiply with this one. + A new vector which is the pointwise multiplication of the two vectors. + If this vector and are not the same size. + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + If this vector and are not the same size. + If this vector and are not the same size. + + + + Pointwise divide this vector with another vector. + + The pointwise denominator vector to use. + A new vector which is the pointwise division of the two vectors. + If this vector and are not the same size. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The vector to store the result of the pointwise division. + If this vector and are not the same size. + If this vector and are not the same size. + + + + Pointwise raise this vector to an exponent. + + The exponent to raise this vector values to. + + + + Pointwise raise this vector to an exponent and store the result into the result vector. + + The exponent to raise this vector values to. + The matrix to store the result into. + If this vector and are not the same size. + + + + Pointwise raise this vector to an exponent and store the result into the result vector. + + The exponent to raise this vector values to. + + + + Pointwise raise this vector to an exponent. + + The exponent to raise this vector values to. + The vector to store the result into. + If this vector and are not the same size. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this vector with another vector. + + The pointwise denominator vector to use. + If this vector and are not the same size. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The vector to store the result of the pointwise modulus. + If this vector and are not the same size. + If this vector and are not the same size. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this vector with another vector. + + The pointwise denominator vector to use. + If this vector and are not the same size. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The vector to store the result of the pointwise remainder. + If this vector and are not the same size. + If this vector and are not the same size. + + + + Helper function to apply a unary function to a vector. The function + f modifies the vector given to it in place. Before its + called, a copy of the 'this' vector with the same dimension is + first created, then passed to f. The copy is returned as the result + + Function which takes a vector, modifies it in place and returns void + New instance of vector which is the result + + + + Helper function to apply a unary function which modifies a vector + in place. + + Function which takes a vector, modifies it in place and returns void + The vector where the result is to be stored + If this vector and are not the same size. + + + + Helper function to apply a binary function which takes a scalar and + a vector and modifies the latter in place. A copy of the "this" + vector is therefore first made and then passed to f together with + the scalar argument. The copy is then returned as the result + + Function which takes a scalar and a vector, modifies the vector in place and returns void + The scalar to be passed to the function + The resulting vector + + + + Helper function to apply a binary function which takes a scalar and + a vector, modifies the latter in place and returns void. + + Function which takes a scalar and a vector, modifies the vector in place and returns void + The scalar to be passed to the function + If this vector and are not the same size. + + + + Helper function to apply a binary function which takes two vectors + and modifies the latter in place. A copy of the "this" vector is + first made and then passed to f together with the other vector. The + copy is then returned as the result + + Function which takes two vectors, modifies the second in place and returns void + The other vector to be passed to the function as argument. It is not modified + The resulting vector + If this vector and are not the same size. + + + + Helper function to apply a binary function which takes two vectors + and modifies the second one in place + + Function which takes two vectors, modifies the second in place and returns void + The other vector to be passed to the function as argument. It is not modified + The resulting vector + If this vector and are not the same size. + + + + Pointwise applies the exponent function to each value. + + + + + Pointwise applies the exponent function to each value. + + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the natural logarithm function to each value. + + + + + Pointwise applies the natural logarithm function to each value. + + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the abs function to each value + + + + + Pointwise applies the abs function to each value + + The vector to store the result + + + + Pointwise applies the acos function to each value + + + + + Pointwise applies the acos function to each value + + The vector to store the result + + + + Pointwise applies the asin function to each value + + + + + Pointwise applies the asin function to each value + + The vector to store the result + + + + Pointwise applies the atan function to each value + + + + + Pointwise applies the atan function to each value + + The vector to store the result + + + + Pointwise applies the atan2 function to each value of the current + vector and a given other vector being the 'x' of atan2 and the + 'this' vector being the 'y' + + + + + + + Pointwise applies the atan2 function to each value of the current + vector and a given other vector being the 'x' of atan2 and the + 'this' vector being the 'y' + + + + + + + Pointwise applies the ceiling function to each value + + + + + Pointwise applies the ceiling function to each value + + The vector to store the result + + + + Pointwise applies the cos function to each value + + + + + Pointwise applies the cos function to each value + + The vector to store the result + + + + Pointwise applies the cosh function to each value + + + + + Pointwise applies the cosh function to each value + + The vector to store the result + + + + Pointwise applies the floor function to each value + + + + + Pointwise applies the floor function to each value + + The vector to store the result + + + + Pointwise applies the log10 function to each value + + + + + Pointwise applies the log10 function to each value + + The vector to store the result + + + + Pointwise applies the round function to each value + + + + + Pointwise applies the round function to each value + + The vector to store the result + + + + Pointwise applies the sign function to each value + + + + + Pointwise applies the sign function to each value + + The vector to store the result + + + + Pointwise applies the sin function to each value + + + + + Pointwise applies the sin function to each value + + The vector to store the result + + + + Pointwise applies the sinh function to each value + + + + + Pointwise applies the sinh function to each value + + The vector to store the result + + + + Pointwise applies the sqrt function to each value + + + + + Pointwise applies the sqrt function to each value + + The vector to store the result + + + + Pointwise applies the tan function to each value + + + + + Pointwise applies the tan function to each value + + The vector to store the result + + + + Pointwise applies the tanh function to each value + + + + + Pointwise applies the tanh function to each value + + The vector to store the result + + + + Computes the outer product M[i,j] = u[i]*v[j] of this and another vector. + + The other vector + + + + Computes the outer product M[i,j] = u[i]*v[j] of this and another vector and stores the result in the result matrix. + + The other vector + The matrix to store the result of the product. + + + + Pointwise applies the minimum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the minimum with a scalar to each value. + + The scalar value to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the maximum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the maximum with a scalar to each value. + + The scalar value to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the absolute minimum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the absolute minimum with a scalar to each value. + + The scalar value to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the absolute maximum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the absolute maximum with a scalar to each value. + + The scalar value to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the minimum with the values of another vector to each value. + + The vector with the values to compare to. + + + + Pointwise applies the minimum with the values of another vector to each value. + + The vector with the values to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the maximum with the values of another vector to each value. + + The vector with the values to compare to. + + + + Pointwise applies the maximum with the values of another vector to each value. + + The vector with the values to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the absolute minimum with the values of another vector to each value. + + The vector with the values to compare to. + + + + Pointwise applies the absolute minimum with the values of another vector to each value. + + The vector with the values to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the absolute maximum with the values of another vector to each value. + + The vector with the values to compare to. + + + + Pointwise applies the absolute maximum with the values of another vector to each value. + + The vector with the values to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = (sum(abs(this[i])^p))^(1/p) + + + + Normalizes this vector to a unit vector with respect to the p-norm. + + The p value. + This vector normalized to a unit vector with respect to the p-norm. + + + + Returns the value of the absolute minimum element. + + The value of the absolute minimum element. + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the value of the absolute maximum element. + + The value of the absolute maximum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Returns the value of maximum element. + + The value of maximum element. + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the value of the minimum element. + + The value of the minimum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Computes the sum of the absolute value of the vector's elements. + + The sum of the absolute value of the vector's elements. + + + + Indicates whether the current object is equal to another object of the same type. + + An object to compare with this object. + + true if the current object is equal to the parameter; otherwise, false. + + + + + Determines whether the specified is equal to this instance. + + The to compare with this instance. + + true if the specified is equal to this instance; otherwise, false. + + + + + Returns a hash code for this instance. + + + A hash code for this instance, suitable for use in hashing algorithms and data structures like a hash table. + + + + + Creates a new object that is a copy of the current instance. + + + A new object that is a copy of this instance. + + + + + Returns an enumerator that iterates through the collection. + + + A that can be used to iterate through the collection. + + + + + Returns an enumerator that iterates through a collection. + + + An object that can be used to iterate through the collection. + + + + + Returns a string that describes the type, dimensions and shape of this vector. + + + + + Returns a string that represents the content of this vector, column by column. + + Maximum number of entries and thus lines per column. Typical value: 12; Minimum: 3. + Maximum number of chatacters per line over all columns. Typical value: 80; Minimum: 16. + Character to use to print if there is not enough space to print all entries. Typical value: "..". + Character to use to separate two coluns on a line. Typical value: " " (2 spaces). + Character to use to separate two rows/lines. Typical value: Environment.NewLine. + Function to provide a string for any given entry value. + + + + Returns a string that represents the content of this vector, column by column. + + Maximum number of entries and thus lines per column. Typical value: 12; Minimum: 3. + Maximum number of chatacters per line over all columns. Typical value: 80; Minimum: 16. + Floating point format string. Can be null. Default value: G6. + Format provider or culture. Can be null. + + + + Returns a string that represents the content of this vector, column by column. + + Floating point format string. Can be null. Default value: G6. + Format provider or culture. Can be null. + + + + Returns a string that summarizes this vector, column by column and with a type header. + + Maximum number of entries and thus lines per column. Typical value: 12; Minimum: 3. + Maximum number of chatacters per line over all columns. Typical value: 80; Minimum: 16. + Floating point format string. Can be null. Default value: G6. + Format provider or culture. Can be null. + + + + Returns a string that summarizes this vector. + The maximum number of cells can be configured in the class. + + + + + Returns a string that summarizes this vector. + The maximum number of cells can be configured in the class. + The format string is ignored. + + + + + Returns a Vector containing the same values of . + + This method is included for completeness. + The vector to get the values from. + A vector containing the same values as . + If is . + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Adds a scalar to each element of a vector. + + The vector to add to. + The scalar value to add. + The result of the addition. + If is . + + + + Adds a scalar to each element of a vector. + + The scalar value to add. + The vector to add to. + The result of the addition. + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Subtracts a scalar from each element of a vector. + + The vector to subtract from. + The scalar value to subtract. + The result of the subtraction. + If is . + + + + Substracts each element of a vector from a scalar. + + The scalar value to subtract from. + The vector to subtract. + The result of the subtraction. + If is . + + + + Multiplies a vector with a scalar. + + The vector to scale. + The scalar value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a scalar. + + The scalar value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a scalar with a vector. + + The scalar to divide. + The vector. + The result of the division. + If is . + + + + Divides a vector with a scalar. + + The vector to divide. + The scalar value. + The result of the division. + If is . + + + + Pointwise divides two Vectors. + + The vector to divide. + The other vector. + The result of the division. + If and are not the same size. + If is . + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + of each element of the vector of the given divisor. + + The vector whose elements we want to compute the remainder of. + The divisor to use. + If is . + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + of the given dividend of each element of the vector. + + The dividend we want to compute the remainder of. + The vector whose elements we want to use as divisor. + If is . + + + + Computes the pointwise remainder (% operator), where the result has the sign of the dividend, + of each element of two vectors. + + The vector whose elements we want to compute the remainder of. + The divisor to use. + If and are not the same size. + If is . + + + + Computes the sqrt of a vector pointwise + + The input vector + + + + + Computes the exponential of a vector pointwise + + The input vector + + + + + Computes the log of a vector pointwise + + The input vector + + + + + Computes the log10 of a vector pointwise + + The input vector + + + + + Computes the sin of a vector pointwise + + The input vector + + + + + Computes the cos of a vector pointwise + + The input vector + + + + + Computes the tan of a vector pointwise + + The input vector + + + + + Computes the asin of a vector pointwise + + The input vector + + + + + Computes the acos of a vector pointwise + + The input vector + + + + + Computes the atan of a vector pointwise + + The input vector + + + + + Computes the sinh of a vector pointwise + + The input vector + + + + + Computes the cosh of a vector pointwise + + The input vector + + + + + Computes the tanh of a vector pointwise + + The input vector + + + + + Computes the absolute value of a vector pointwise + + The input vector + + + + + Computes the floor of a vector pointwise + + The input vector + + + + + Computes the ceiling of a vector pointwise + + The input vector + + + + + Computes the rounded value of a vector pointwise + + The input vector + + + + + Initializes a new instance of the Vector class. + + + + + Gets the raw vector data storage. + + + + + Gets the length or number of dimensions of this vector. + + + + Gets or sets the value at the given . + The index of the value to get or set. + The value of the vector at the given . + If is negative or + greater than the size of the vector. + + + Gets the value at the given without range checking.. + The index of the value to get or set. + The value of the vector at the given . + + + Sets the at the given without range checking.. + The index of the value to get or set. + The value to set. + + + + Resets all values to zero. + + + + + Sets all values of a subvector to zero. + + + + + Set all values whose absolute value is smaller than the threshold to zero, in-place. + + + + + Set all values that meet the predicate to zero, in-place. + + + + + Returns a deep-copy clone of the vector. + + A deep-copy clone of the vector. + + + + Set the values of this vector to the given values. + + The array containing the values to use. + If is . + If is not the same size as this vector. + + + + Copies the values of this vector into the target vector. + + The vector to copy elements into. + If is . + If is not the same size as this vector. + + + + Creates a vector containing specified elements. + + The first element to begin copying from. + The number of elements to copy. + A vector containing a copy of the specified elements. + If is not positive or + greater than or equal to the size of the vector. + If + is greater than or equal to the size of the vector. + + If is not positive. + + + + Copies the values of a given vector into a region in this vector. + + The field to start copying to + The number of fields to cpy. Must be positive. + The sub-vector to copy from. + If is + + + + Copies the requested elements from this vector to another. + + The vector to copy the elements to. + The element to start copying from. + The element to start copying to. + The number of elements to copy. + + + + Returns the data contained in the vector as an array. + The returned array will be independent from this vector. + A new memory block will be allocated for the array. + + The vector's data as an array. + + + + Returns the internal array of this vector if, and only if, this vector is stored by such an array internally. + Otherwise returns null. Changes to the returned array and the vector will affect each other. + Use ToArray instead if you always need an independent array. + + + + + Create a matrix based on this vector in column form (one single column). + + + This vector as a column matrix. + + + + + Create a matrix based on this vector in row form (one single row). + + + This vector as a row matrix. + + + + + Returns an IEnumerable that can be used to iterate through all values of the vector. + + + The enumerator will include all values, even if they are zero. + + + + + Returns an IEnumerable that can be used to iterate through all values of the vector. + + + The enumerator will include all values, even if they are zero. + + + + + Returns an IEnumerable that can be used to iterate through all values of the vector and their index. + + + The enumerator returns a Tuple with the first value being the element index + and the second value being the value of the element at that index. + The enumerator will include all values, even if they are zero. + + + + + Returns an IEnumerable that can be used to iterate through all values of the vector and their index. + + + The enumerator returns a Tuple with the first value being the element index + and the second value being the value of the element at that index. + The enumerator will include all values, even if they are zero. + + + + + Returns an IEnumerable that can be used to iterate through all non-zero values of the vector. + + + The enumerator will skip all elements with a zero value. + + + + + Returns an IEnumerable that can be used to iterate through all non-zero values of the vector and their index. + + + The enumerator returns a Tuple with the first value being the element index + and the second value being the value of the element at that index. + The enumerator will skip all elements with a zero value. + + + + + Applies a function to each value of this vector and replaces the value with its result. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value of this vector and replaces the value with its result. + The index of each value (zero-based) is passed as first argument to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value of this vector and replaces the value in the result vector. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value of this vector and replaces the value in the result vector. + The index of each value (zero-based) is passed as first argument to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value of this vector and replaces the value in the result vector. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value of this vector and replaces the value in the result vector. + The index of each value (zero-based) is passed as first argument to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value of this vector and returns the results as a new vector. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value of this vector and returns the results as a new vector. + The index of each value (zero-based) is passed as first argument to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value pair of two vectors and replaces the value in the result vector. + + + + + Applies a function to each value pair of two vectors and returns the results as a new vector. + + + + + Applies a function to update the status with each value pair of two vectors and returns the resulting status. + + + + + Returns a tuple with the index and value of the first element satisfying a predicate, or null if none is found. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns a tuple with the index and values of the first element pair of two vectors of the same size satisfying a predicate, or null if none is found. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if at least one element satisfies a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if at least one element pairs of two vectors of the same size satisfies a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if all elements satisfy a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if all element pairs of two vectors of the same size satisfy a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Array that contains the indices of the non-zero values. + + + + + Array that contains the non-zero elements of the vector. + + + + + Gets the number of non-zero elements in the vector. + + + + + True if the vector storage format is dense. + + + + + Retrieves the requested element without range checking. + + + + + Sets the element without range checking. + + + + + Calculates the amount with which to grow the storage array's if they need to be + increased in size. + + The amount grown. + + + + Returns a hash code for this instance. + + + A hash code for this instance, suitable for use in hashing algorithms and data structures like a hash table. + + + + + True if the vector storage format is dense. + + + + + Retrieves the requested element without range checking. + + + + + Sets the element without range checking. + + + + + True if the matrix storage format is dense. + + + + + True if all fields of this matrix can be set to any value. + False if some fields are fixed, like on a diagonal matrix. + + + + + True if the specified field can be set to any value. + False if the field is fixed, like an off-diagonal field on a diagonal matrix. + + + + + Gets or sets the value at the given row and column, with range checking. + + + The row of the element. + + + The column of the element. + + The value to get or set. + This method is ranged checked. and + to get and set values without range checking. + + + + Retrieves the requested element without range checking. + + + The row of the element. + + + The column of the element. + + + The requested element. + + Not range-checked. + + + + Sets the element without range checking. + + The row of the element. + The column of the element. + The value to set the element to. + WARNING: This method is not thread safe. Use "lock" with it and be sure to avoid deadlocks. + + + + Indicates whether the current object is equal to another object of the same type. + + + An object to compare with this object. + + + true if the current object is equal to the parameter; otherwise, false. + + + + + Determines whether the specified is equal to the current . + + + true if the specified is equal to the current ; otherwise, false. + + The to compare with the current . + + + + Serves as a hash function for a particular type. + + + A hash code for the current . + + + + The state array will not be modified, unless it is the same instance as the target array (which is allowed). + + + The state array will not be modified, unless it is the same instance as the target array (which is allowed). + + + The state array will not be modified, unless it is the same instance as the target array (which is allowed). + + + The state array will not be modified, unless it is the same instance as the target array (which is allowed). + + + + True if the matrix storage format is dense. + + + + + True if all fields of this matrix can be set to any value. + False if some fields are fixed, like on a diagonal matrix. + + + + + True if the specified field can be set to any value. + False if the field is fixed, like an off-diagonal field on a diagonal matrix. + + + + + Retrieves the requested element without range checking. + + + + + Sets the element without range checking. + + + + + Evaluate the row and column at a specific data index. + + + + + The array containing the row indices of the existing rows. Element "i" of the array gives the index of the + element in the array that is first non-zero element in a row "i". + The last value is equal to ValueCount, so that the number of non-zero entries in row "i" is always + given by RowPointers[i+i] - RowPointers[i]. This array thus has length RowCount+1. + + + + + An array containing the column indices of the non-zero values. Element "j" of the array + is the number of the column in matrix that contains the j-th value in the array. + + + + + Array that contains the non-zero elements of matrix. Values of the non-zero elements of matrix are mapped into the values + array using the row-major storage mapping described in a compressed sparse row (CSR) format. + + + + + Gets the number of non zero elements in the matrix. + + The number of non zero elements. + + + + True if the matrix storage format is dense. + + + + + True if all fields of this matrix can be set to any value. + False if some fields are fixed, like on a diagonal matrix. + + + + + True if the specified field can be set to any value. + False if the field is fixed, like an off-diagonal field on a diagonal matrix. + + + + + Retrieves the requested element without range checking. + + + The row of the element. + + + The column of the element. + + + The requested element. + + Not range-checked. + + + + Sets the element without range checking. + + The row of the element. + The column of the element. + The value to set the element to. + WARNING: This method is not thread safe. Use "lock" with it and be sure to avoid deadlocks. + + + + Delete value from internal storage + + Index of value in nonZeroValues array + Row number of matrix + WARNING: This method is not thread safe. Use "lock" with it and be sure to avoid deadlocks + + + + Find item Index in nonZeroValues array + + Matrix row index + Matrix column index + Item index + WARNING: This method is not thread safe. Use "lock" with it and be sure to avoid deadlocks + + + + Calculates the amount with which to grow the storage array's if they need to be + increased in size. + + The amount grown. + + + + Returns a hash code for this instance. + + + A hash code for this instance, suitable for use in hashing algorithms and data structures like a hash table. + + + + + True if the matrix storage format is dense. + + + + + True if all fields of this matrix can be set to any value. + False if some fields are fixed, like on a diagonal matrix. + + + + + True if the specified field can be set to any value. + False if the field is fixed, like an off-diagonal field on a diagonal matrix. + + + + + Retrieves the requested element without range checking. + + + + + Sets the element without range checking. + + + + + Returns a hash code for this instance. + + + A hash code for this instance, suitable for use in hashing algorithms and data structures like a hash table. + + + + + True if the vector storage format is dense. + + + + + Gets or sets the value at the given index, with range checking. + + + The index of the element. + + The value to get or set. + This method is ranged checked. and + to get and set values without range checking. + + + + Retrieves the requested element without range checking. + + The index of the element. + The requested element. + Not range-checked. + + + + Sets the element without range checking. + + The index of the element. + The value to set the element to. + WARNING: This method is not thread safe. Use "lock" with it and be sure to avoid deadlocks. + + + + Indicates whether the current object is equal to another object of the same type. + + + An object to compare with this object. + + + true if the current object is equal to the parameter; otherwise, false. + + + + + Determines whether the specified is equal to the current . + + + true if the specified is equal to the current ; otherwise, false. + + The to compare with the current . + + + + Serves as a hash function for a particular type. + + + A hash code for the current . + + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + Supported data types are double, single, , and . + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + Supported data types are double, single, , and . + + + + Gets or sets a value indicating whether matrix is symmetric or not + + + + + Gets the absolute value of determinant of the square matrix for which the EVD was computed. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + Gets or sets the eigen values (λ) of matrix in ascending value. + + + + + Gets or sets eigenvectors. + + + + + Gets or sets the block diagonal eigenvalue matrix. + + + + + Solves a system of linear equations, AX = B, with A EVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, AX = B, with A EVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of a Cholesky factorization. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + Supported data types are double, single, , and . + + + + Gets the lower triangular form of the Cholesky matrix. + + + + + Gets the determinant of the matrix for which the Cholesky matrix was computed. + + + + + Gets the log determinant of the matrix for which the Cholesky matrix was computed. + + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + In the Math.Net implementation we also store a set of pivot elements for increased + numerical stability. The pivot elements encode a permutation matrix P such that P*A = L*U. + + + The computation of the LU factorization is done at construction time. + + Supported data types are double, single, , and . + + + + Gets the lower triangular factor. + + + + + Gets the upper triangular factor. + + + + + Gets the permutation applied to LU factorization. + + + + + Gets the determinant of the matrix for which the LU factorization was computed. + + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + The type of QR factorization go perform. + + + + + Compute the full QR factorization of a matrix. + + + + + Compute the thin QR factorization of a matrix. + + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A (m x n) may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + If a factorization is performed, the resulting Q matrix is an m x m matrix + and the R matrix is an m x n matrix. If a factorization is performed, the + resulting Q matrix is an m x n matrix and the R matrix is an n x n matrix. + + Supported data types are double, single, , and . + + + + Gets or sets orthogonal Q matrix + + + + + Gets the upper triangular factor R. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD). + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + Supported data types are double, single, , and . + + + Indicating whether U and VT matrices have been computed during SVD factorization. + + + + Gets the singular values (Σ) of matrix in ascending value. + + + + + Gets the left singular vectors (U - m-by-m unitary matrix) + + + + + Gets the transpose right singular vectors (transpose of V, an n-by-n unitary matrix) + + + + + Returns the singular values as a diagonal . + + The singular values as a diagonal . + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets the two norm of the . + + The 2-norm of the . + + + + Gets the condition number max(S) / min(S) + + The condition number. + + + + Gets the determinant of the square matrix for which the SVD was computed. + + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Classes that solves a system of linear equations, AX = B. + + Supported data types are double, single, , and . + + + + Solves a system of linear equations, AX = B. + + The right hand side Matrix, B. + The left hand side Matrix, X. + + + + Solves a system of linear equations, AX = B. + + The right hand side Matrix, B. + The left hand side Matrix, X. + + + + Solves a system of linear equations, Ax = b + + The right hand side vector, b. + The left hand side Vector, x. + + + + Solves a system of linear equations, Ax = b. + + The right hand side vector, b. + The left hand side Matrix>, x. + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + + Predictor matrix X + Response vector Y + The direct method to be used to compute the regression. + Best fitting vector for model parameters β + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + + Predictor matrix X + Response matrix Y + The direct method to be used to compute the regression. + Best fitting vector for model parameters β + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + + List of predictor-arrays. + List of responses + True if an intercept should be added as first artificial predictor value. Default = false. + The direct method to be used to compute the regression. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + Uses the cholesky decomposition of the normal equations. + + Sequence of predictor-arrays and their response. + True if an intercept should be added as first artificial predictor value. Default = false. + The direct method to be used to compute the regression. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + Uses the cholesky decomposition of the normal equations. + + Predictor matrix X + Response vector Y + Best fitting vector for model parameters β + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + Uses the cholesky decomposition of the normal equations. + + Predictor matrix X + Response matrix Y + Best fitting vector for model parameters β + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + Uses the cholesky decomposition of the normal equations. + + List of predictor-arrays. + List of responses + True if an intercept should be added as first artificial predictor value. Default = false. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + Uses the cholesky decomposition of the normal equations. + + Sequence of predictor-arrays and their response. + True if an intercept should be added as first artificial predictor value. Default = false. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + Uses an orthogonal decomposition and is therefore more numerically stable than the normal equations but also slower. + + Predictor matrix X + Response vector Y + Best fitting vector for model parameters β + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + Uses an orthogonal decomposition and is therefore more numerically stable than the normal equations but also slower. + + Predictor matrix X + Response matrix Y + Best fitting vector for model parameters β + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + Uses an orthogonal decomposition and is therefore more numerically stable than the normal equations but also slower. + + List of predictor-arrays. + List of responses + True if an intercept should be added as first artificial predictor value. Default = false. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + Uses an orthogonal decomposition and is therefore more numerically stable than the normal equations but also slower. + + Sequence of predictor-arrays and their response. + True if an intercept should be added as first artificial predictor value. Default = false. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + Uses a singular value decomposition and is therefore more numerically stable (especially if ill-conditioned) than the normal equations or QR but also slower. + + Predictor matrix X + Response vector Y + Best fitting vector for model parameters β + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + Uses a singular value decomposition and is therefore more numerically stable (especially if ill-conditioned) than the normal equations or QR but also slower. + + Predictor matrix X + Response matrix Y + Best fitting vector for model parameters β + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + Uses a singular value decomposition and is therefore more numerically stable (especially if ill-conditioned) than the normal equations or QR but also slower. + + List of predictor-arrays. + List of responses + True if an intercept should be added as first artificial predictor value. Default = false. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + Uses a singular value decomposition and is therefore more numerically stable (especially if ill-conditioned) than the normal equations or QR but also slower. + + Sequence of predictor-arrays and their response. + True if an intercept should be added as first artificial predictor value. Default = false. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Weighted Linear Regression using normal equations. + + Predictor matrix X + Response vector Y + Weight matrix W, usually diagonal with an entry for each predictor (row). + + + + Weighted Linear Regression using normal equations. + + Predictor matrix X + Response matrix Y + Weight matrix W, usually diagonal with an entry for each predictor (row). + + + + Weighted Linear Regression using normal equations. + + Predictor matrix X + Response vector Y + Weight matrix W, usually diagonal with an entry for each predictor (row). + True if an intercept should be added as first artificial predictor value. Default = false. + + + + Weighted Linear Regression using normal equations. + + List of sample vectors (predictor) together with their response. + List of weights, one for each sample. + True if an intercept should be added as first artificial predictor value. Default = false. + + + + Locally-Weighted Linear Regression using normal equations. + + + + + Locally-Weighted Linear Regression using normal equations. + + + + + Least-Squares fitting the points (x,y) to a line y : x -> a+b*x, + returning its best fitting parameters as (a, b) tuple, + where a is the intercept and b the slope. + + Predictor (independent) + Response (dependent) + + + + Least-Squares fitting the points (x,y) to a line y : x -> a+b*x, + returning its best fitting parameters as (a, b) tuple, + where a is the intercept and b the slope. + + Predictor-Response samples as tuples + + + + First Order AB method(same as Forward Euler) + + Initial value + Start Time + End Time + Size of output array(the larger, the finer) + ode model + approximation with size N + + + + Second Order AB Method + + Initial value 1 + Start Time + End Time + Size of output array(the larger, the finer) + ode model + approximation with size N + + + + Third Order AB Method + + Initial value 1 + Start Time + End Time + Size of output array(the larger, the finer) + ode model + approximation with size N + + + + Fourth Order AB Method + + Initial value 1 + Start Time + End Time + Size of output array(the larger, the finer) + ode model + approximation with size N + + + + ODE Solver Algorithms + + + + + Second Order Runge-Kutta method + + initial value + start time + end time + Size of output array(the larger, the finer) + ode function + approximations + + + + Fourth Order Runge-Kutta method + + initial value + start time + end time + Size of output array(the larger, the finer) + ode function + approximations + + + + Second Order Runge-Kutta to solve ODE SYSTEM + + initial vector + start time + end time + Size of output array(the larger, the finer) + ode function + approximations + + + + Fourth Order Runge-Kutta to solve ODE SYSTEM + + initial vector + start time + end time + Size of output array(the larger, the finer) + ode function + approximations + + + + Utilities for working with floating point numbers. + + + + Useful links: + + + http://docs.sun.com/source/806-3568/ncg_goldberg.html#689 - What every computer scientist should know about floating-point arithmetic + + + http://en.wikipedia.org/wiki/Machine_epsilon - Gives the definition of machine epsilon + + + + + + + + Compares two doubles and determines which double is bigger. + a < b -> -1; a ~= b (almost equal according to parameter) -> 0; a > b -> +1. + + The first value. + The second value. + The absolute accuracy required for being almost equal. + + + + Compares two doubles and determines which double is bigger. + a < b -> -1; a ~= b (almost equal according to parameter) -> 0; a > b -> +1. + + The first value. + The second value. + The number of decimal places on which the values must be compared. Must be 1 or larger. + + + + Compares two doubles and determines which double is bigger. + a < b -> -1; a ~= b (almost equal according to parameter) -> 0; a > b -> +1. + + The first value. + The second value. + The relative accuracy required for being almost equal. + + + + Compares two doubles and determines which double is bigger. + a < b -> -1; a ~= b (almost equal according to parameter) -> 0; a > b -> +1. + + The first value. + The second value. + The number of decimal places on which the values must be compared. Must be 1 or larger. + + + + Compares two doubles and determines which double is bigger. + a < b -> -1; a ~= b (almost equal according to parameter) -> 0; a > b -> +1. + + The first value. + The second value. + The maximum error in terms of Units in Last Place (ulps), i.e. the maximum number of decimals that may be different. Must be 1 or larger. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The absolute accuracy required for being almost equal. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The absolute accuracy required for being almost equal. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The relative accuracy required for being almost equal. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The relative accuracy required for being almost equal. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the tolerance or not. Equality comparison is based on the binary representation. + + The first value. + The second value. + The maximum number of floating point values for which the two values are considered equal. Must be 1 or larger. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the tolerance or not. Equality comparison is based on the binary representation. + + The first value. + The second value. + The maximum number of floating point values for which the two values are considered equal. Must be 1 or larger. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of thg. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of thg. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The absolute accuracy required for being almost equal. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The absolute accuracy required for being almost equal. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The number of decimal places. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The number of decimal places. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The relative accuracy required for being almost equal. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The relative accuracy required for being almost equal. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the tolerance or not. Equality comparison is based on the binary representation. + + The first value. + The second value. + The maximum number of floating point values for which the two values are considered equal. Must be 1 or larger. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the tolerance or not. Equality comparison is based on the binary representation. + + The first value. + The second value. + The maximum number of floating point values for which the two values are considered equal. Must be 1 or larger. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if they are equal + within the specified maximum absolute error. + + The norm of the first value (can be negative). + The norm of the second value (can be negative). + The norm of the difference of the two values (can be negative). + The absolute accuracy required for being almost equal. + True if both doubles are almost equal up to the specified maximum absolute error, false otherwise. + + + + Compares two doubles and determines if they are equal + within the specified maximum absolute error. + + The first value. + The second value. + The absolute accuracy required for being almost equal. + True if both doubles are almost equal up to the specified maximum absolute error, false otherwise. + + + + Compares two doubles and determines if they are equal + within the specified maximum error. + + The norm of the first value (can be negative). + The norm of the second value (can be negative). + The norm of the difference of the two values (can be negative). + The accuracy required for being almost equal. + True if both doubles are almost equal up to the specified maximum error, false otherwise. + + + + Compares two doubles and determines if they are equal + within the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + True if both doubles are almost equal up to the specified maximum error, false otherwise. + + + + Compares two doubles and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two complex and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two complex and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two complex and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two doubles and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two complex and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two complex and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two complex and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Checks whether two real numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Checks whether two real numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Checks whether two Compex numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Checks whether two Compex numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Checks whether two real numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Checks whether two real numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Checks whether two Compex numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Checks whether two Compex numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not, using the + number of decimal places as an absolute measure. + + + + The values are equal if the difference between the two numbers is smaller than 0.5e-decimalPlaces. We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The norm of the first value (can be negative). + The norm of the second value (can be negative). + The norm of the difference of the two values (can be negative). + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not, using the + number of decimal places as an absolute measure. + + + + The values are equal if the difference between the two numbers is smaller than 0.5e-decimalPlaces. We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not. If the numbers + are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The norm of the first value (can be negative). + The norm of the second value (can be negative). + The norm of the difference of the two values (can be negative). + The number of decimal places. + Thrown if is smaller than zero. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not. If the numbers + are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not, using the + number of decimal places as an absolute measure. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not, using the + number of decimal places as an absolute measure. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not, using the + number of decimal places as an absolute measure. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not, using the + number of decimal places as an absolute measure. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not. If the numbers + are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not. If the numbers + are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not. If the numbers + are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not. If the numbers + are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the tolerance or not. Equality comparison is based on the binary representation. + + + + Determines the 'number' of floating point numbers between two values (i.e. the number of discrete steps + between the two numbers) and then checks if that is within the specified tolerance. So if a tolerance + of 1 is passed then the result will be true only if the two numbers have the same binary representation + OR if they are two adjacent numbers that only differ by one step. + + + The comparison method used is explained in http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm . The article + at http://www.extremeoptimization.com/resources/Articles/FPDotNetConceptsAndFormats.aspx explains how to transform the C code to + .NET enabled code without using pointers and unsafe code. + + + The first value. + The second value. + The maximum number of floating point values between the two values. Must be 1 or larger. + Thrown if is smaller than one. + + + + Compares two floats and determines if they are equal to within the tolerance or not. Equality comparison is based on the binary representation. + + The first value. + The second value. + The maximum number of floating point values between the two values. Must be 1 or larger. + Thrown if is smaller than one. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two vectors and determines if they are equal within the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two vectors and determines if they are equal within the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two vectors and determines if they are equal to within the specified number + of decimal places or not, using the number of decimal places as an absolute measure. + + The first value. + The second value. + The number of decimal places. + + + + Compares two vectors and determines if they are equal to within the specified number of decimal places or not. + If the numbers are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + The first value. + The second value. + The number of decimal places. + + + + Compares two matrices and determines if they are equal within the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two matrices and determines if they are equal within the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two matrices and determines if they are equal to within the specified number + of decimal places or not, using the number of decimal places as an absolute measure. + + The first value. + The second value. + The number of decimal places. + + + + Compares two matrices and determines if they are equal to within the specified number of decimal places or not. + If the numbers are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + The first value. + The second value. + The number of decimal places. + + + + The number of binary digits used to represent the binary number for a double precision floating + point value. i.e. there are this many digits used to represent the + actual number, where in a number as: 0.134556 * 10^5 the digits are 0.134556 and the exponent is 5. + + + + + The number of binary digits used to represent the binary number for a single precision floating + point value. i.e. there are this many digits used to represent the + actual number, where in a number as: 0.134556 * 10^5 the digits are 0.134556 and the exponent is 5. + + + + + Standard epsilon, the maximum relative precision of IEEE 754 double-precision floating numbers (64 bit). + According to the definition of Prof. Demmel and used in LAPACK and Scilab. + + + + + Standard epsilon, the maximum relative precision of IEEE 754 double-precision floating numbers (64 bit). + According to the definition of Prof. Higham and used in the ISO C standard and MATLAB. + + + + + Standard epsilon, the maximum relative precision of IEEE 754 single-precision floating numbers (32 bit). + According to the definition of Prof. Demmel and used in LAPACK and Scilab. + + + + + Standard epsilon, the maximum relative precision of IEEE 754 single-precision floating numbers (32 bit). + According to the definition of Prof. Higham and used in the ISO C standard and MATLAB. + + + + + Actual double precision machine epsilon, the smallest number that can be subtracted from 1, yielding a results different than 1. + This is also known as unit roundoff error. According to the definition of Prof. Demmel. + On a standard machine this is equivalent to `DoublePrecision`. + + + + + Actual double precision machine epsilon, the smallest number that can be added to 1, yielding a results different than 1. + This is also known as unit roundoff error. According to the definition of Prof. Higham. + On a standard machine this is equivalent to `PositiveDoublePrecision`. + + + + + The number of significant decimal places of double-precision floating numbers (64 bit). + + + + + The number of significant decimal places of single-precision floating numbers (32 bit). + + + + + Value representing 10 * 2^(-53) = 1.11022302462516E-15 + + + + + Value representing 10 * 2^(-24) = 5.96046447753906E-07 + + + + + Returns the magnitude of the number. + + The value. + The magnitude of the number. + + + + Returns the magnitude of the number. + + The value. + The magnitude of the number. + + + + Returns the number divided by it's magnitude, effectively returning a number between -10 and 10. + + The value. + The value of the number. + + + + Returns a 'directional' long value. This is a long value which acts the same as a double, + e.g. a negative double value will return a negative double value starting at 0 and going + more negative as the double value gets more negative. + + The input double value. + A long value which is roughly the equivalent of the double value. + + + + Returns a 'directional' int value. This is a int value which acts the same as a float, + e.g. a negative float value will return a negative int value starting at 0 and going + more negative as the float value gets more negative. + + The input float value. + An int value which is roughly the equivalent of the double value. + + + + Increments a floating point number to the next bigger number representable by the data type. + + The value which needs to be incremented. + How many times the number should be incremented. + + The incrementation step length depends on the provided value. + Increment(double.MaxValue) will return positive infinity. + + The next larger floating point value. + + + + Decrements a floating point number to the next smaller number representable by the data type. + + The value which should be decremented. + How many times the number should be decremented. + + The decrementation step length depends on the provided value. + Decrement(double.MinValue) will return negative infinity. + + The next smaller floating point value. + + + + Forces small numbers near zero to zero, according to the specified absolute accuracy. + + The real number to coerce to zero, if it is almost zero. + The maximum count of numbers between the zero and the number . + + Zero if || is fewer than numbers from zero, otherwise. + + + + + Forces small numbers near zero to zero, according to the specified absolute accuracy. + + The real number to coerce to zero, if it is almost zero. + The maximum count of numbers between the zero and the number . + + Zero if || is fewer than numbers from zero, otherwise. + + + Thrown if is smaller than zero. + + + + + Forces small numbers near zero to zero, according to the specified absolute accuracy. + + The real number to coerce to zero, if it is almost zero. + The absolute threshold for to consider it as zero. + Zero if || is smaller than , otherwise. + + Thrown if is smaller than zero. + + + + + Forces small numbers near zero to zero. + + The real number to coerce to zero, if it is almost zero. + Zero if || is smaller than 2^(-53) = 1.11e-16, otherwise. + + + + Determines the range of floating point numbers that will match the specified value with the given tolerance. + + The value. + The ulps difference. + + Thrown if is smaller than zero. + + Tuple of the bottom and top range ends. + + + + Returns the floating point number that will match the value with the tolerance on the maximum size (i.e. the result is + always bigger than the value) + + The value. + The ulps difference. + The maximum floating point number which is larger than the given . + + + + Returns the floating point number that will match the value with the tolerance on the minimum size (i.e. the result is + always smaller than the value) + + The value. + The ulps difference. + The minimum floating point number which is smaller than the given . + + + + Determines the range of ulps that will match the specified value with the given tolerance. + + The value. + The relative difference. + + Thrown if is smaller than zero. + + + Thrown if is double.PositiveInfinity or double.NegativeInfinity. + + + Thrown if is double.NaN. + + + Tuple with the number of ULPS between the value and the value - relativeDifference as first, + and the number of ULPS between the value and the value + relativeDifference as second value. + + + + + Evaluates the count of numbers between two double numbers + + The first parameter. + The second parameter. + The second number is included in the number, thus two equal numbers evaluate to zero and two neighbor numbers evaluate to one. Therefore, what is returned is actually the count of numbers between plus 1. + The number of floating point values between and . + + Thrown if is double.PositiveInfinity or double.NegativeInfinity. + + + Thrown if is double.NaN. + + + Thrown if is double.PositiveInfinity or double.NegativeInfinity. + + + Thrown if is double.NaN. + + + + + Evaluates the minimum distance to the next distinguishable number near the argument value. + + The value used to determine the minimum distance. + + Relative Epsilon (positive double or NaN). + + Evaluates the negative epsilon. The more common positive epsilon is equal to two times this negative epsilon. + + + + + Evaluates the minimum distance to the next distinguishable number near the argument value. + + The value used to determine the minimum distance. + + Relative Epsilon (positive float or NaN). + + Evaluates the negative epsilon. The more common positive epsilon is equal to two times this negative epsilon. + + + + + Evaluates the minimum distance to the next distinguishable number near the argument value. + + The value used to determine the minimum distance. + Relative Epsilon (positive double or NaN) + Evaluates the positive epsilon. See also + + + + + Evaluates the minimum distance to the next distinguishable number near the argument value. + + The value used to determine the minimum distance. + Relative Epsilon (positive float or NaN) + Evaluates the positive epsilon. See also + + + + + Calculates the actual (negative) double precision machine epsilon - the smallest number that can be subtracted from 1, yielding a results different than 1. + This is also known as unit roundoff error. According to the definition of Prof. Demmel. + + Positive Machine epsilon + + + + Calculates the actual positive double precision machine epsilon - the smallest number that can be added to 1, yielding a results different than 1. + This is also known as unit roundoff error. According to the definition of Prof. Higham. + + Machine epsilon + + + + A strongly-typed resource class, for looking up localized strings, etc. + + + + + Returns the cached ResourceManager instance used by this class. + + + + + Overrides the current thread's CurrentUICulture property for all + resource lookups using this strongly typed resource class. + + + + + Looks up a localized string similar to The accuracy couldn't be reached with the specified number of iterations.. + + + + + Looks up a localized string similar to The array arguments must have the same length.. + + + + + Looks up a localized string similar to The given array has the wrong length. Should be {0}.. + + + + + Looks up a localized string similar to The argument must be between 0 and 1.. + + + + + Looks up a localized string similar to Value cannot be in the range -1 < x < 1.. + + + + + Looks up a localized string similar to Value must be even.. + + + + + Looks up a localized string similar to The histogram does not contain the value.. + + + + + Looks up a localized string similar to Value is expected to be between {0} and {1} (including {0} and {1}).. + + + + + Looks up a localized string similar to At least one item of {0} is a null reference (Nothing in Visual Basic).. + + + + + Looks up a localized string similar to Value must be greater than or equal to one.. + + + + + Looks up a localized string similar to Matrix dimensions must agree.. + + + + + Looks up a localized string similar to Matrix dimensions must agree: {0}.. + + + + + Looks up a localized string similar to Matrix dimensions must agree: op1 is {0}, op2 is {1}.. + + + + + Looks up a localized string similar to Matrix dimensions must agree: op1 is {0}, op2 is {1}, op3 is {2}.. + + + + + Looks up a localized string similar to The requested matrix does not exist.. + + + + + Looks up a localized string similar to The matrix indices must not be out of range of the given matrix.. + + + + + Looks up a localized string similar to Matrix must not be rank deficient.. + + + + + Looks up a localized string similar to Matrix must not be singular.. + + + + + Looks up a localized string similar to Matrix must be positive definite.. + + + + + Looks up a localized string similar to Matrix column dimensions must agree.. + + + + + Looks up a localized string similar to Matrix row dimensions must agree.. + + + + + Looks up a localized string similar to Matrix must have exactly one column.. + + + + + Looks up a localized string similar to Matrix must have exactly one column and row, thus have only one cell.. + + + + + Looks up a localized string similar to Matrix must have exactly one row.. + + + + + Looks up a localized string similar to Matrix must be square.. + + + + + Looks up a localized string similar to Matrix must be symmetric.. + + + + + Looks up a localized string similar to Matrix must be symmetric positive definite.. + + + + + Looks up a localized string similar to In the specified range, the exclusive maximum must be greater than the inclusive minimum.. + + + + + Looks up a localized string similar to In the specified range, the minimum is greater than maximum.. + + + + + Looks up a localized string similar to Value must be positive.. + + + + + Looks up a localized string similar to Value must neither be infinite nor NaN.. + + + + + Looks up a localized string similar to Value must not be negative (zero is ok).. + + + + + Looks up a localized string similar to {0} is a null reference (Nothing in Visual Basic).. + + + + + Looks up a localized string similar to Value must be odd.. + + + + + Looks up a localized string similar to {0} must be greater than {1}.. + + + + + Looks up a localized string similar to {0} must be greater than or equal to {1}.. + + + + + Looks up a localized string similar to {0} must be smaller than {1}.. + + + + + Looks up a localized string similar to {0} must be smaller than or equal to {1}.. + + + + + Looks up a localized string similar to The chosen parameter set is invalid (probably some value is out of range).. + + + + + Looks up a localized string similar to The given expression does not represent a complex number.. + + + + + Looks up a localized string similar to Value must be positive (and not zero).. + + + + + Looks up a localized string similar to Size must be a Power of Two.. + + + + + Looks up a localized string similar to Size must be a Power of Two in every dimension.. + + + + + Looks up a localized string similar to The range between {0} and {1} must be less than or equal to {2}.. + + + + + Looks up a localized string similar to Arguments must be different objects.. + + + + + Looks up a localized string similar to Array must have exactly one dimension (and not be null).. + + + + + Looks up a localized string similar to Value is too large.. + + + + + Looks up a localized string similar to Value is too large for the current iteration limit.. + + + + + Looks up a localized string similar to Type mismatch.. + + + + + Looks up a localized string similar to The upper bound must be strictly larger than the lower bound.. + + + + + Looks up a localized string similar to The upper bound must be at least as large as the lower bound.. + + + + + Looks up a localized string similar to Array length must be a multiple of {0}.. + + + + + Looks up a localized string similar to All vectors must have the same dimensionality.. + + + + + Looks up a localized string similar to The vector must have 3 dimensions.. + + + + + Looks up a localized string similar to The given array is too small. It must be at least {0} long.. + + + + + Looks up a localized string similar to Big endian files are not supported.. + + + + + Looks up a localized string similar to The supplied collection is empty.. + + + + + Looks up a localized string similar to Complex matrices are not supported.. + + + + + Looks up a localized string similar to An algorithm failed to converge.. + + + + + Looks up a localized string similar to The sample size must be larger than the given degrees of freedom.. + + + + + Looks up a localized string similar to This feature is not implemented yet (but is planned).. + + + + + Looks up a localized string similar to The given file doesn't exist.. + + + + + Looks up a localized string similar to Sample points should be sorted in strictly ascending order. + + + + + Looks up a localized string similar to All sample points should be unique.. + + + + + Looks up a localized string similar to Invalid parameterization for the distribution.. + + + + + Looks up a localized string similar to Invalid Left Boundary Condition.. + + + + + Looks up a localized string similar to The operation could not be performed because the accumulator is empty.. + + + + + Looks up a localized string similar to The operation could not be performed because the histogram is empty.. + + + + + Looks up a localized string similar to Not enough points in the distribution.. + + + + + Looks up a localized string similar to No Samples Provided. Preparation Required.. + + + + + Looks up a localized string similar to An invalid parameter was passed to a native method.. + + + + + Looks up a localized string similar to An invalid parameter was passed to a native method, parameter number : {0}. + + + + + Looks up a localized string similar to Invalid Right Boundary Condition.. + + + + + Looks up a localized string similar to Lag must be positive. + + + + + Looks up a localized string similar to Lag must be smaller than the sample size. + + + + + Looks up a localized string similar to ddd MMM dd HH:mm:ss yyyy. + + + + + Looks up a localized string similar to Matrices can not be empty and must have at least one row and column.. + + + + + Looks up a localized string similar to The number of columns of a matrix must be positive.. + + + + + Looks up a localized string similar to Matrix must be in sparse storage format. + + + + + Looks up a localized string similar to The number of rows of a matrix must be positive.. + + + + + Looks up a localized string similar to The number of rows or columns of a matrix must be positive.. + + + + + Looks up a localized string similar to Unable to allocate native memory.. + + + + + Looks up a localized string similar to Only 1 and 2 dimensional arrays are supported.. + + + + + Looks up a localized string similar to Data must contain at least {0} values.. + + + + + Looks up a localized string similar to Name cannot contain a space. name: {0}. + + + + + Looks up a localized string similar to {0} is not a supported type.. + + + + + Looks up a localized string similar to Algorithm experience a numerical break down + . + + + + + Looks up a localized string similar to The two arguments can't be compared (maybe they are part of a partial ordering?). + + + + + Looks up a localized string similar to The integer array does not represent a valid permutation.. + + + + + Looks up a localized string similar to The sampler's proposal distribution is not upper bounding the target density.. + + + + + Looks up a localized string similar to A regression of the requested order requires at least {0} samples. Only {1} samples have been provided. . + + + + + Looks up a localized string similar to The algorithm has failed, exceeded the number of iterations allowed or there is no root within the provided bounds.. + + + + + Looks up a localized string similar to The algorithm has failed, exceeded the number of iterations allowed or there is no root within the provided bounds. Consider to use RobustNewtonRaphson instead.. + + + + + Looks up a localized string similar to The lower and upper bounds must bracket a single root.. + + + + + Looks up a localized string similar to The algorithm ended without root in the range.. + + + + + Looks up a localized string similar to The number of rows must greater than or equal to the number of columns.. + + + + + Looks up a localized string similar to All sample vectors must have the same length. However, vectors with disagreeing length {0} and {1} have been provided. A sample with index i is given by the value at index i of each provided vector.. + + + + + Looks up a localized string similar to U is singular, and the inversion could not be completed.. + + + + + Looks up a localized string similar to U is singular, and the inversion could not be completed. The {0}-th diagonal element of the factor U is zero.. + + + + + Looks up a localized string similar to The singular vectors were not computed.. + + + + + Looks up a localized string similar to This special case is not supported yet (but is planned).. + + + + + Looks up a localized string similar to The given stop criterion already exist in the collection.. + + + + + Looks up a localized string similar to There is no stop criterion in the collection.. + + + + + Looks up a localized string similar to String parameter cannot be empty or null.. + + + + + Looks up a localized string similar to We only support sparse matrix with less than int.MaxValue elements.. + + + + + Looks up a localized string similar to The moment of the distribution is undefined.. + + + + + Looks up a localized string similar to A user defined provider has not been specified.. + + + + + Looks up a localized string similar to User work buffers are not supported by this provider.. + + + + + Looks up a localized string similar to Vectors can not be empty and must have at least one element.. + + + + + Looks up a localized string similar to The given work array is too small. Check work[0] for the corret size.. + + + + + P/Invoke methods to the native math libraries. + + + + + Name of the native DLL. + + + + + Frees the memory allocated to the MKL memory pool. + + + + + Frees the memory allocated to the MKL memory pool on the current thread. + + + + + Disable the MKL memory pool. May impact performance. + + + + + Retrieves information about the MKL memory pool. + + On output, returns the number of memory buffers allocated. + Returns the number of bytes allocated to all memory buffers. + + + + Enable gathering of peak memory statistics of the MKL memory pool. + + + + + Disable gathering of peak memory statistics of the MKL memory pool. + + + + + Measures peak memory usage of the MKL memory pool. + + Whether the usage counter should be reset. + The peak number of bytes allocated to all memory buffers. + + + + Disable gathering memory usage + + + + + Enable gathering memory usage + + + + + Return peak memory usage + + + + + Return peak memory usage and reset counter + + + + + Consistency vs. performance trade-off between runs on different machines. + + + + Consistent on the same CPU only (maximum performance) + + + Consistent on Intel and compatible CPUs with SSE2 support (maximum compatibility) + + + Consistent on Intel CPUs supporting SSE2 or later + + + Consistent on Intel CPUs supporting SSE4.2 or later + + + Consistent on Intel CPUs supporting AVX or later + + + Consistent on Intel CPUs supporting AVX2 or later + + + + P/Invoke methods to the native math libraries. + + + + + Name of the native DLL. + + + + + P/Invoke methods to the native math libraries. + + + + + Name of the native DLL. + + + + + Helper class to load native libraries depending on the architecture of the OS and process. + + + + + Dictionary of handles to previously loaded libraries, + + + + + Gets a string indicating the architecture and bitness of the current process. + + + + + If the last native library failed to load then gets the corresponding exception + which occurred or null if the library was successfully loaded. + + + + + Load the native library with the given filename. + + The file name of the library to load. + True if the library was successfully loaded or if it has already been loaded. + + + + Try to load a native library by providing its name and a directory. + Tries to load an implementation suitable for the current CPU architecture + and process mode if there is a matching subfolder. + + True if the library was successfully loaded or if it has already been loaded. + + + + Try to load a native library by providing the full path including the file name of the library. + + True if the library was successfully loaded or if it has already been loaded. + + + + Try to find out whether the provider is available, at least in principle. + Verification may still fail if available, but it will certainly fail if unavailable. + + + + + Initialize and verify that the provided is indeed available. If not, fall back to alternatives like the managed provider + + + + + Try to use a native provider, if available. + + + + + Use the best provider available. + + + + + Use a specific provider if configured, e.g. using the + "MathNetNumericsFFTProvider" environment variable, + or fall back to the best provider. + + + + + Try to find out whether the provider is available, at least in principle. + Verification may still fail if available, but it will certainly fail if unavailable. + + + + + Initialize and verify that the provided is indeed available. If not, fall back to alternatives like the managed provider + + + + + Try to find out whether the provider is available, at least in principle. + Verification may still fail if available, but it will certainly fail if unavailable. + + + + + Initialize and verify that the provided is indeed available. If not, fall back to alternatives like the managed provider + + + + + Frees the memory allocated to the MKL memory pool. + + + + + Frees the memory allocated to the MKL memory pool on the current thread. + + + + + Disable the MKL memory pool. May impact performance. + + + + + Retrieves information about the MKL memory pool. + + On output, returns the number of memory buffers allocated. + Returns the number of bytes allocated to all memory buffers. + + + + Enable gathering of peak memory statistics of the MKL memory pool. + + + + + Disable gathering of peak memory statistics of the MKL memory pool. + + + + + Measures peak memory usage of the MKL memory pool. + + Whether the usage counter should be reset. + The peak number of bytes allocated to all memory buffers. + + + + NVidia's CUDA Toolkit linear algebra provider. + + + NVidia's CUDA Toolkit linear algebra provider. + + + NVidia's CUDA Toolkit linear algebra provider. + + + NVidia's CUDA Toolkit linear algebra provider. + + + NVidia's CUDA Toolkit linear algebra provider. + + + + + Computes the dot product of x and y. + + The vector x. + The vector y. + The dot product of x and y. + This is equivalent to the DOT BLAS routine. + + + + Adds a scaled vector to another: result = y + alpha*x. + + The vector to update. + The value to scale by. + The vector to add to . + The result of the addition. + This is similar to the AXPY BLAS routine. + + + + Scales an array. Can be used to scale a vector and a matrix. + + The scalar. + The values to scale. + This result of the scaling. + This is similar to the SCAL BLAS routine. + + + + Multiples two matrices. result = x * y + + The x matrix. + The number of rows in the x matrix. + The number of columns in the x matrix. + The y matrix. + The number of rows in the y matrix. + The number of columns in the y matrix. + Where to store the result of the multiplication. + This is a simplified version of the BLAS GEMM routine with alpha + set to Complex.One and beta set to Complex.Zero, and x and y are not transposed. + + + + Multiplies two matrices and updates another with the result. c = alpha*op(a)*op(b) + beta*c + + How to transpose the matrix. + How to transpose the matrix. + The value to scale matrix. + The a matrix. + The number of rows in the matrix. + The number of columns in the matrix. + The b matrix + The number of rows in the matrix. + The number of columns in the matrix. + The value to scale the matrix. + The c matrix. + + + + Computes the LUP factorization of A. P*A = L*U. + + An by matrix. The matrix is overwritten with the + the LU factorization on exit. The lower triangular factor L is stored in under the diagonal of (the diagonal is always Complex.One + for the L factor). The upper triangular factor U is stored on and above the diagonal of . + The order of the square matrix . + On exit, it contains the pivot indices. The size of the array must be . + This is equivalent to the GETRF LAPACK routine. + + + + Computes the inverse of matrix using LU factorization. + + The N by N matrix to invert. Contains the inverse On exit. + The order of the square matrix . + This is equivalent to the GETRF and GETRI LAPACK routines. + + + + Computes the inverse of a previously factored matrix. + + The LU factored N by N matrix. Contains the inverse On exit. + The order of the square matrix . + The pivot indices of . + This is equivalent to the GETRI LAPACK routine. + + + + Solves A*X=B for X using LU factorization. + + The number of columns of B. + The square matrix A. + The order of the square matrix . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRF and GETRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The number of columns of B. + The factored A matrix. + The order of the square matrix . + The pivot indices of . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRS LAPACK routine. + + + + Computes the Cholesky factorization of A. + + On entry, a square, positive definite matrix. On exit, the matrix is overwritten with the + the Cholesky factorization. + The number of rows or columns in the matrix. + This is equivalent to the POTRF LAPACK routine. + + + + Solves A*X=B for X using Cholesky factorization. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRF add POTRS LAPACK routines. + + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRS LAPACK routine. + + + + Solves A*X=B for X using the singular value decomposition of A. + + On entry, the M by N matrix to decompose. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Computes the singular value decomposition of A. + + Compute the singular U and VT vectors or not. + On entry, the M by N matrix to decompose. On exit, A may be overwritten. + The number of rows in the A matrix. + The number of columns in the A matrix. + The singular values of A in ascending value. + If is true, on exit U contains the left + singular vectors. + If is true, on exit VT contains the transposed + right singular vectors. + This is equivalent to the GESVD LAPACK routine. + + + + Computes the dot product of x and y. + + The vector x. + The vector y. + The dot product of x and y. + This is equivalent to the DOT BLAS routine. + + + + Adds a scaled vector to another: result = y + alpha*x. + + The vector to update. + The value to scale by. + The vector to add to . + The result of the addition. + This is similar to the AXPY BLAS routine. + + + + Scales an array. Can be used to scale a vector and a matrix. + + The scalar. + The values to scale. + This result of the scaling. + This is similar to the SCAL BLAS routine. + + + + Multiples two matrices. result = x * y + + The x matrix. + The number of rows in the x matrix. + The number of columns in the x matrix. + The y matrix. + The number of rows in the y matrix. + The number of columns in the y matrix. + Where to store the result of the multiplication. + This is a simplified version of the BLAS GEMM routine with alpha + set to Complex32.One and beta set to Complex32.Zero, and x and y are not transposed. + + + + Multiplies two matrices and updates another with the result. c = alpha*op(a)*op(b) + beta*c + + How to transpose the matrix. + How to transpose the matrix. + The value to scale matrix. + The a matrix. + The number of rows in the matrix. + The number of columns in the matrix. + The b matrix + The number of rows in the matrix. + The number of columns in the matrix. + The value to scale the matrix. + The c matrix. + + + + Computes the LUP factorization of A. P*A = L*U. + + An by matrix. The matrix is overwritten with the + the LU factorization on exit. The lower triangular factor L is stored in under the diagonal of (the diagonal is always Complex32.One + for the L factor). The upper triangular factor U is stored on and above the diagonal of . + The order of the square matrix . + On exit, it contains the pivot indices. The size of the array must be . + This is equivalent to the GETRF LAPACK routine. + + + + Computes the inverse of matrix using LU factorization. + + The N by N matrix to invert. Contains the inverse On exit. + The order of the square matrix . + This is equivalent to the GETRF and GETRI LAPACK routines. + + + + Computes the inverse of a previously factored matrix. + + The LU factored N by N matrix. Contains the inverse On exit. + The order of the square matrix . + The pivot indices of . + This is equivalent to the GETRI LAPACK routine. + + + + Solves A*X=B for X using LU factorization. + + The number of columns of B. + The square matrix A. + The order of the square matrix . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRF and GETRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The number of columns of B. + The factored A matrix. + The order of the square matrix . + The pivot indices of . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRS LAPACK routine. + + + + Computes the Cholesky factorization of A. + + On entry, a square, positive definite matrix. On exit, the matrix is overwritten with the + the Cholesky factorization. + The number of rows or columns in the matrix. + This is equivalent to the POTRF LAPACK routine. + + + + Solves A*X=B for X using Cholesky factorization. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRF add POTRS LAPACK routines. + + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRS LAPACK routine. + + + + Solves A*X=B for X using the singular value decomposition of A. + + On entry, the M by N matrix to decompose. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Computes the singular value decomposition of A. + + Compute the singular U and VT vectors or not. + On entry, the M by N matrix to decompose. On exit, A may be overwritten. + The number of rows in the A matrix. + The number of columns in the A matrix. + The singular values of A in ascending value. + If is true, on exit U contains the left + singular vectors. + If is true, on exit VT contains the transposed + right singular vectors. + This is equivalent to the GESVD LAPACK routine. + + + + Try to find out whether the provider is available, at least in principle. + Verification may still fail if available, but it will certainly fail if unavailable. + + + + + Initialize and verify that the provided is indeed available. + If calling this method fails, consider to fall back to alternatives like the managed provider. + + + + + Computes the dot product of x and y. + + The vector x. + The vector y. + The dot product of x and y. + This is equivalent to the DOT BLAS routine. + + + + Adds a scaled vector to another: result = y + alpha*x. + + The vector to update. + The value to scale by. + The vector to add to . + The result of the addition. + This is similar to the AXPY BLAS routine. + + + + Scales an array. Can be used to scale a vector and a matrix. + + The scalar. + The values to scale. + This result of the scaling. + This is similar to the SCAL BLAS routine. + + + + Multiples two matrices. result = x * y + + The x matrix. + The number of rows in the x matrix. + The number of columns in the x matrix. + The y matrix. + The number of rows in the y matrix. + The number of columns in the y matrix. + Where to store the result of the multiplication. + This is a simplified version of the BLAS GEMM routine with alpha + set to 1.0 and beta set to 0.0, and x and y are not transposed. + + + + Multiplies two matrices and updates another with the result. c = alpha*op(a)*op(b) + beta*c + + How to transpose the matrix. + How to transpose the matrix. + The value to scale matrix. + The a matrix. + The number of rows in the matrix. + The number of columns in the matrix. + The b matrix + The number of rows in the matrix. + The number of columns in the matrix. + The value to scale the matrix. + The c matrix. + + + + Computes the LUP factorization of A. P*A = L*U. + + An by matrix. The matrix is overwritten with the + the LU factorization on exit. The lower triangular factor L is stored in under the diagonal of (the diagonal is always 1.0 + for the L factor). The upper triangular factor U is stored on and above the diagonal of . + The order of the square matrix . + On exit, it contains the pivot indices. The size of the array must be . + This is equivalent to the GETRF LAPACK routine. + + + + Computes the inverse of matrix using LU factorization. + + The N by N matrix to invert. Contains the inverse On exit. + The order of the square matrix . + This is equivalent to the GETRF and GETRI LAPACK routines. + + + + Computes the inverse of a previously factored matrix. + + The LU factored N by N matrix. Contains the inverse On exit. + The order of the square matrix . + The pivot indices of . + This is equivalent to the GETRI LAPACK routine. + + + + Solves A*X=B for X using LU factorization. + + The number of columns of B. + The square matrix A. + The order of the square matrix . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRF and GETRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The number of columns of B. + The factored A matrix. + The order of the square matrix . + The pivot indices of . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRS LAPACK routine. + + + + Computes the Cholesky factorization of A. + + On entry, a square, positive definite matrix. On exit, the matrix is overwritten with the + the Cholesky factorization. + The number of rows or columns in the matrix. + This is equivalent to the POTRF LAPACK routine. + + + + Solves A*X=B for X using Cholesky factorization. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRF add POTRS LAPACK routines. + + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRS LAPACK routine. + + + + Solves A*X=B for X using the singular value decomposition of A. + + On entry, the M by N matrix to decompose. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Computes the singular value decomposition of A. + + Compute the singular U and VT vectors or not. + On entry, the M by N matrix to decompose. On exit, A may be overwritten. + The number of rows in the A matrix. + The number of columns in the A matrix. + The singular values of A in ascending value. + If is true, on exit U contains the left + singular vectors. + If is true, on exit VT contains the transposed + right singular vectors. + This is equivalent to the GESVD LAPACK routine. + + + + Computes the dot product of x and y. + + The vector x. + The vector y. + The dot product of x and y. + This is equivalent to the DOT BLAS routine. + + + + Adds a scaled vector to another: result = y + alpha*x. + + The vector to update. + The value to scale by. + The vector to add to . + The result of the addition. + This is similar to the AXPY BLAS routine. + + + + Scales an array. Can be used to scale a vector and a matrix. + + The scalar. + The values to scale. + This result of the scaling. + This is similar to the SCAL BLAS routine. + + + + Multiples two matrices. result = x * y + + The x matrix. + The number of rows in the x matrix. + The number of columns in the x matrix. + The y matrix. + The number of rows in the y matrix. + The number of columns in the y matrix. + Where to store the result of the multiplication. + This is a simplified version of the BLAS GEMM routine with alpha + set to 1.0f and beta set to 0.0f, and x and y are not transposed. + + + + Multiplies two matrices and updates another with the result. c = alpha*op(a)*op(b) + beta*c + + How to transpose the matrix. + How to transpose the matrix. + The value to scale matrix. + The a matrix. + The number of rows in the matrix. + The number of columns in the matrix. + The b matrix + The number of rows in the matrix. + The number of columns in the matrix. + The value to scale the matrix. + The c matrix. + + + + Computes the LUP factorization of A. P*A = L*U. + + An by matrix. The matrix is overwritten with the + the LU factorization on exit. The lower triangular factor L is stored in under the diagonal of (the diagonal is always 1.0f + for the L factor). The upper triangular factor U is stored on and above the diagonal of . + The order of the square matrix . + On exit, it contains the pivot indices. The size of the array must be . + This is equivalent to the GETRF LAPACK routine. + + + + Computes the inverse of matrix using LU factorization. + + The N by N matrix to invert. Contains the inverse On exit. + The order of the square matrix . + This is equivalent to the GETRF and GETRI LAPACK routines. + + + + Computes the inverse of a previously factored matrix. + + The LU factored N by N matrix. Contains the inverse On exit. + The order of the square matrix . + The pivot indices of . + This is equivalent to the GETRI LAPACK routine. + + + + Solves A*X=B for X using LU factorization. + + The number of columns of B. + The square matrix A. + The order of the square matrix . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRF and GETRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The number of columns of B. + The factored A matrix. + The order of the square matrix . + The pivot indices of . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRS LAPACK routine. + + + + Computes the Cholesky factorization of A. + + On entry, a square, positive definite matrix. On exit, the matrix is overwritten with the + the Cholesky factorization. + The number of rows or columns in the matrix. + This is equivalent to the POTRF LAPACK routine. + + + + Solves A*X=B for X using Cholesky factorization. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRF add POTRS LAPACK routines. + + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRS LAPACK routine. + + + + Solves A*X=B for X using the singular value decomposition of A. + + On entry, the M by N matrix to decompose. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Computes the singular value decomposition of A. + + Compute the singular U and VT vectors or not. + On entry, the M by N matrix to decompose. On exit, A may be overwritten. + The number of rows in the A matrix. + The number of columns in the A matrix. + The singular values of A in ascending value. + If is true, on exit U contains the left + singular vectors. + If is true, on exit VT contains the transposed + right singular vectors. + This is equivalent to the GESVD LAPACK routine. + + + + Try to use a native provider, if available. + + + + + Use the best provider available. + + + + + Use a specific provider if configured, e.g. using the + "MathNetNumericsLAProvider" environment variable, + or fall back to the best provider. + + + + + Error codes return from the native OpenBLAS provider. + + + + + Unable to allocate memory. + + + + + OpenBLAS linear algebra provider. + + + OpenBLAS linear algebra provider. + + + OpenBLAS linear algebra provider. + + + OpenBLAS linear algebra provider. + + + OpenBLAS linear algebra provider. + + + + + Try to find out whether the provider is available, at least in principle. + Verification may still fail if available, but it will certainly fail if unavailable. + + + + + Initialize and verify that the provided is indeed available. + If not, fall back to alternatives like the managed provider + + + + + Computes the requested of the matrix. + + The type of norm to compute. + The number of rows in the matrix. + The number of columns in the matrix. + The matrix to compute the norm from. + + The requested of the matrix. + + + + + Computes the dot product of x and y. + + The vector x. + The vector y. + The dot product of x and y. + This is equivalent to the DOT BLAS routine. + + + + Adds a scaled vector to another: result = y + alpha*x. + + The vector to update. + The value to scale by. + The vector to add to . + The result of the addition. + This is similar to the AXPY BLAS routine. + + + + Scales an array. Can be used to scale a vector and a matrix. + + The scalar. + The values to scale. + This result of the scaling. + This is similar to the SCAL BLAS routine. + + + + Multiples two matrices. result = x * y + + The x matrix. + The number of rows in the x matrix. + The number of columns in the x matrix. + The y matrix. + The number of rows in the y matrix. + The number of columns in the y matrix. + Where to store the result of the multiplication. + This is a simplified version of the BLAS GEMM routine with alpha + set to Complex.One and beta set to Complex.Zero, and x and y are not transposed. + + + + Multiplies two matrices and updates another with the result. c = alpha*op(a)*op(b) + beta*c + + How to transpose the matrix. + How to transpose the matrix. + The value to scale matrix. + The a matrix. + The number of rows in the matrix. + The number of columns in the matrix. + The b matrix + The number of rows in the matrix. + The number of columns in the matrix. + The value to scale the matrix. + The c matrix. + + + + Computes the LUP factorization of A. P*A = L*U. + + An by matrix. The matrix is overwritten with the + the LU factorization on exit. The lower triangular factor L is stored in under the diagonal of (the diagonal is always Complex.One + for the L factor). The upper triangular factor U is stored on and above the diagonal of . + The order of the square matrix . + On exit, it contains the pivot indices. The size of the array must be . + This is equivalent to the GETRF LAPACK routine. + + + + Computes the inverse of matrix using LU factorization. + + The N by N matrix to invert. Contains the inverse On exit. + The order of the square matrix . + This is equivalent to the GETRF and GETRI LAPACK routines. + + + + Computes the inverse of a previously factored matrix. + + The LU factored N by N matrix. Contains the inverse On exit. + The order of the square matrix . + The pivot indices of . + This is equivalent to the GETRI LAPACK routine. + + + + Solves A*X=B for X using LU factorization. + + The number of columns of B. + The square matrix A. + The order of the square matrix . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRF and GETRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The number of columns of B. + The factored A matrix. + The order of the square matrix . + The pivot indices of . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRS LAPACK routine. + + + + Computes the Cholesky factorization of A. + + On entry, a square, positive definite matrix. On exit, the matrix is overwritten with the + the Cholesky factorization. + The number of rows or columns in the matrix. + This is equivalent to the POTRF LAPACK routine. + + + + Solves A*X=B for X using Cholesky factorization. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRF add POTRS LAPACK routines. + + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRS LAPACK routine. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the R matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A M by M matrix that holds the Q matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Computes the thin QR factorization of A where M > N. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the Q matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A N by N matrix that holds the R matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Solves A*X=B for X using QR factorization of A. + + The A matrix. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Solves A*X=B for X using a previously QR factored matrix. + + The Q matrix obtained by calling . + The R matrix obtained by calling . + The number of rows in the A matrix. + The number of columns in the A matrix. + Contains additional information on Q. Only used for the native solver + and can be null for the managed provider. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Solves A*X=B for X using the singular value decomposition of A. + + On entry, the M by N matrix to decompose. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Computes the singular value decomposition of A. + + Compute the singular U and VT vectors or not. + On entry, the M by N matrix to decompose. On exit, A may be overwritten. + The number of rows in the A matrix. + The number of columns in the A matrix. + The singular values of A in ascending value. + If is true, on exit U contains the left + singular vectors. + If is true, on exit VT contains the transposed + right singular vectors. + This is equivalent to the GESVD LAPACK routine. + + + + Computes the eigenvalues and eigenvectors of a matrix. + + Whether the matrix is symmetric or not. + The order of the matrix. + The matrix to decompose. The lenth of the array must be order * order. + On output, the matrix contains the eigen vectors. The lenth of the array must be order * order. + On output, the eigen values (λ) of matrix in ascending value. The length of the arry must . + On output, the block diagonal eigenvalue matrix. The lenth of the array must be order * order. + + + + Computes the requested of the matrix. + + The type of norm to compute. + The number of rows in the matrix. + The number of columns in the matrix. + The matrix to compute the norm from. + + The requested of the matrix. + + + + + Computes the dot product of x and y. + + The vector x. + The vector y. + The dot product of x and y. + This is equivalent to the DOT BLAS routine. + + + + Adds a scaled vector to another: result = y + alpha*x. + + The vector to update. + The value to scale by. + The vector to add to . + The result of the addition. + This is similar to the AXPY BLAS routine. + + + + Scales an array. Can be used to scale a vector and a matrix. + + The scalar. + The values to scale. + This result of the scaling. + This is similar to the SCAL BLAS routine. + + + + Multiples two matrices. result = x * y + + The x matrix. + The number of rows in the x matrix. + The number of columns in the x matrix. + The y matrix. + The number of rows in the y matrix. + The number of columns in the y matrix. + Where to store the result of the multiplication. + This is a simplified version of the BLAS GEMM routine with alpha + set to Complex32.One and beta set to Complex32.Zero, and x and y are not transposed. + + + + Multiplies two matrices and updates another with the result. c = alpha*op(a)*op(b) + beta*c + + How to transpose the matrix. + How to transpose the matrix. + The value to scale matrix. + The a matrix. + The number of rows in the matrix. + The number of columns in the matrix. + The b matrix + The number of rows in the matrix. + The number of columns in the matrix. + The value to scale the matrix. + The c matrix. + + + + Computes the LUP factorization of A. P*A = L*U. + + An by matrix. The matrix is overwritten with the + the LU factorization on exit. The lower triangular factor L is stored in under the diagonal of (the diagonal is always Complex32.One + for the L factor). The upper triangular factor U is stored on and above the diagonal of . + The order of the square matrix . + On exit, it contains the pivot indices. The size of the array must be . + This is equivalent to the GETRF LAPACK routine. + + + + Computes the inverse of matrix using LU factorization. + + The N by N matrix to invert. Contains the inverse On exit. + The order of the square matrix . + This is equivalent to the GETRF and GETRI LAPACK routines. + + + + Computes the inverse of a previously factored matrix. + + The LU factored N by N matrix. Contains the inverse On exit. + The order of the square matrix . + The pivot indices of . + This is equivalent to the GETRI LAPACK routine. + + + + Solves A*X=B for X using LU factorization. + + The number of columns of B. + The square matrix A. + The order of the square matrix . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRF and GETRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The number of columns of B. + The factored A matrix. + The order of the square matrix . + The pivot indices of . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRS LAPACK routine. + + + + Computes the Cholesky factorization of A. + + On entry, a square, positive definite matrix. On exit, the matrix is overwritten with the + the Cholesky factorization. + The number of rows or columns in the matrix. + This is equivalent to the POTRF LAPACK routine. + + + + Solves A*X=B for X using Cholesky factorization. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRF add POTRS LAPACK routines. + + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRS LAPACK routine. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the R matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A M by M matrix that holds the Q matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Computes the thin QR factorization of A where M > N. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the Q matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A N by N matrix that holds the R matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Solves A*X=B for X using QR factorization of A. + + The A matrix. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Solves A*X=B for X using a previously QR factored matrix. + + The Q matrix obtained by calling . + The R matrix obtained by calling . + The number of rows in the A matrix. + The number of columns in the A matrix. + Contains additional information on Q. Only used for the native solver + and can be null for the managed provider. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Solves A*X=B for X using the singular value decomposition of A. + + On entry, the M by N matrix to decompose. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Computes the singular value decomposition of A. + + Compute the singular U and VT vectors or not. + On entry, the M by N matrix to decompose. On exit, A may be overwritten. + The number of rows in the A matrix. + The number of columns in the A matrix. + The singular values of A in ascending value. + If is true, on exit U contains the left + singular vectors. + If is true, on exit VT contains the transposed + right singular vectors. + This is equivalent to the GESVD LAPACK routine. + + + + Computes the eigenvalues and eigenvectors of a matrix. + + Whether the matrix is symmetric or not. + The order of the matrix. + The matrix to decompose. The lenth of the array must be order * order. + On output, the matrix contains the eigen vectors. The lenth of the array must be order * order. + On output, the eigen values (λ) of matrix in ascending value. The length of the arry must . + On output, the block diagonal eigenvalue matrix. The lenth of the array must be order * order. + + + + Computes the requested of the matrix. + + The type of norm to compute. + The number of rows in the matrix. + The number of columns in the matrix. + The matrix to compute the norm from. + + The requested of the matrix. + + + + + Computes the dot product of x and y. + + The vector x. + The vector y. + The dot product of x and y. + This is equivalent to the DOT BLAS routine. + + + + Adds a scaled vector to another: result = y + alpha*x. + + The vector to update. + The value to scale by. + The vector to add to . + The result of the addition. + This is similar to the AXPY BLAS routine. + + + + Scales an array. Can be used to scale a vector and a matrix. + + The scalar. + The values to scale. + This result of the scaling. + This is similar to the SCAL BLAS routine. + + + + Multiples two matrices. result = x * y + + The x matrix. + The number of rows in the x matrix. + The number of columns in the x matrix. + The y matrix. + The number of rows in the y matrix. + The number of columns in the y matrix. + Where to store the result of the multiplication. + This is a simplified version of the BLAS GEMM routine with alpha + set to 1.0 and beta set to 0.0, and x and y are not transposed. + + + + Multiplies two matrices and updates another with the result. c = alpha*op(a)*op(b) + beta*c + + How to transpose the matrix. + How to transpose the matrix. + The value to scale matrix. + The a matrix. + The number of rows in the matrix. + The number of columns in the matrix. + The b matrix + The number of rows in the matrix. + The number of columns in the matrix. + The value to scale the matrix. + The c matrix. + + + + Computes the LUP factorization of A. P*A = L*U. + + An by matrix. The matrix is overwritten with the + the LU factorization on exit. The lower triangular factor L is stored in under the diagonal of (the diagonal is always 1.0 + for the L factor). The upper triangular factor U is stored on and above the diagonal of . + The order of the square matrix . + On exit, it contains the pivot indices. The size of the array must be . + This is equivalent to the GETRF LAPACK routine. + + + + Computes the inverse of matrix using LU factorization. + + The N by N matrix to invert. Contains the inverse On exit. + The order of the square matrix . + This is equivalent to the GETRF and GETRI LAPACK routines. + + + + Computes the inverse of a previously factored matrix. + + The LU factored N by N matrix. Contains the inverse On exit. + The order of the square matrix . + The pivot indices of . + This is equivalent to the GETRI LAPACK routine. + + + + Solves A*X=B for X using LU factorization. + + The number of columns of B. + The square matrix A. + The order of the square matrix . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRF and GETRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The number of columns of B. + The factored A matrix. + The order of the square matrix . + The pivot indices of . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRS LAPACK routine. + + + + Computes the Cholesky factorization of A. + + On entry, a square, positive definite matrix. On exit, the matrix is overwritten with the + the Cholesky factorization. + The number of rows or columns in the matrix. + This is equivalent to the POTRF LAPACK routine. + + + + Solves A*X=B for X using Cholesky factorization. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRF add POTRS LAPACK routines. + + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRS LAPACK routine. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the R matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A M by M matrix that holds the Q matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Computes the thin QR factorization of A where M > N. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the Q matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A N by N matrix that holds the R matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Solves A*X=B for X using QR factorization of A. + + The A matrix. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Solves A*X=B for X using a previously QR factored matrix. + + The Q matrix obtained by calling . + The R matrix obtained by calling . + The number of rows in the A matrix. + The number of columns in the A matrix. + Contains additional information on Q. Only used for the native solver + and can be null for the managed provider. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Solves A*X=B for X using the singular value decomposition of A. + + On entry, the M by N matrix to decompose. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Computes the singular value decomposition of A. + + Compute the singular U and VT vectors or not. + On entry, the M by N matrix to decompose. On exit, A may be overwritten. + The number of rows in the A matrix. + The number of columns in the A matrix. + The singular values of A in ascending value. + If is true, on exit U contains the left + singular vectors. + If is true, on exit VT contains the transposed + right singular vectors. + This is equivalent to the GESVD LAPACK routine. + + + + Computes the eigenvalues and eigenvectors of a matrix. + + Whether the matrix is symmetric or not. + The order of the matrix. + The matrix to decompose. The lenth of the array must be order * order. + On output, the matrix contains the eigen vectors. The lenth of the array must be order * order. + On output, the eigen values (λ) of matrix in ascending value. The length of the arry must . + On output, the block diagonal eigenvalue matrix. The lenth of the array must be order * order. + + + + Computes the requested of the matrix. + + The type of norm to compute. + The number of rows in the matrix. + The number of columns in the matrix. + The matrix to compute the norm from. + + The requested of the matrix. + + + + + Computes the dot product of x and y. + + The vector x. + The vector y. + The dot product of x and y. + This is equivalent to the DOT BLAS routine. + + + + Adds a scaled vector to another: result = y + alpha*x. + + The vector to update. + The value to scale by. + The vector to add to . + The result of the addition. + This is similar to the AXPY BLAS routine. + + + + Scales an array. Can be used to scale a vector and a matrix. + + The scalar. + The values to scale. + This result of the scaling. + This is similar to the SCAL BLAS routine. + + + + Multiples two matrices. result = x * y + + The x matrix. + The number of rows in the x matrix. + The number of columns in the x matrix. + The y matrix. + The number of rows in the y matrix. + The number of columns in the y matrix. + Where to store the result of the multiplication. + This is a simplified version of the BLAS GEMM routine with alpha + set to 1.0f and beta set to 0.0f, and x and y are not transposed. + + + + Multiplies two matrices and updates another with the result. c = alpha*op(a)*op(b) + beta*c + + How to transpose the matrix. + How to transpose the matrix. + The value to scale matrix. + The a matrix. + The number of rows in the matrix. + The number of columns in the matrix. + The b matrix + The number of rows in the matrix. + The number of columns in the matrix. + The value to scale the matrix. + The c matrix. + + + + Computes the LUP factorization of A. P*A = L*U. + + An by matrix. The matrix is overwritten with the + the LU factorization on exit. The lower triangular factor L is stored in under the diagonal of (the diagonal is always 1.0f + for the L factor). The upper triangular factor U is stored on and above the diagonal of . + The order of the square matrix . + On exit, it contains the pivot indices. The size of the array must be . + This is equivalent to the GETRF LAPACK routine. + + + + Computes the inverse of matrix using LU factorization. + + The N by N matrix to invert. Contains the inverse On exit. + The order of the square matrix . + This is equivalent to the GETRF and GETRI LAPACK routines. + + + + Computes the inverse of a previously factored matrix. + + The LU factored N by N matrix. Contains the inverse On exit. + The order of the square matrix . + The pivot indices of . + This is equivalent to the GETRI LAPACK routine. + + + + Solves A*X=B for X using LU factorization. + + The number of columns of B. + The square matrix A. + The order of the square matrix . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRF and GETRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The number of columns of B. + The factored A matrix. + The order of the square matrix . + The pivot indices of . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRS LAPACK routine. + + + + Computes the Cholesky factorization of A. + + On entry, a square, positive definite matrix. On exit, the matrix is overwritten with the + the Cholesky factorization. + The number of rows or columns in the matrix. + This is equivalent to the POTRF LAPACK routine. + + + + Solves A*X=B for X using Cholesky factorization. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRF add POTRS LAPACK routines. + + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRS LAPACK routine. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the R matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A M by M matrix that holds the Q matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Computes the thin QR factorization of A where M > N. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the Q matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A N by N matrix that holds the R matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Solves A*X=B for X using QR factorization of A. + + The A matrix. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Solves A*X=B for X using a previously QR factored matrix. + + The Q matrix obtained by calling . + The R matrix obtained by calling . + The number of rows in the A matrix. + The number of columns in the A matrix. + Contains additional information on Q. Only used for the native solver + and can be null for the managed provider. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Solves A*X=B for X using the singular value decomposition of A. + + On entry, the M by N matrix to decompose. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Computes the singular value decomposition of A. + + Compute the singular U and VT vectors or not. + On entry, the M by N matrix to decompose. On exit, A may be overwritten. + The number of rows in the A matrix. + The number of columns in the A matrix. + The singular values of A in ascending value. + If is true, on exit U contains the left + singular vectors. + If is true, on exit VT contains the transposed + right singular vectors. + This is equivalent to the GESVD LAPACK routine. + + + + Computes the eigenvalues and eigenvectors of a matrix. + + Whether the matrix is symmetric or not. + The order of the matrix. + The matrix to decompose. The lenth of the array must be order * order. + On output, the matrix contains the eigen vectors. The lenth of the array must be order * order. + On output, the eigen values (λ) of matrix in ascending value. The length of the arry must . + On output, the block diagonal eigenvalue matrix. The lenth of the array must be order * order. + + + + The managed linear algebra provider. + + + The managed linear algebra provider. + + + The managed linear algebra provider. + + + The managed linear algebra provider. + + + The managed linear algebra provider. + + + + + Adds a scaled vector to another: result = y + alpha*x. + + The vector to update. + The value to scale by. + The vector to add to . + The result of the addition. + This is similar to the AXPY BLAS routine. + + + + Scales an array. Can be used to scale a vector and a matrix. + + The scalar. + The values to scale. + This result of the scaling. + This is similar to the SCAL BLAS routine. + + + + Conjugates an array. Can be used to conjugate a vector and a matrix. + + The values to conjugate. + This result of the conjugation. + + + + Computes the dot product of x and y. + + The vector x. + The vector y. + The dot product of x and y. + This is equivalent to the DOT BLAS routine. + + + + Does a point wise add of two arrays z = x + y. This can be used + to add vectors or matrices. + + The array x. + The array y. + The result of the addition. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise subtraction of two arrays z = x - y. This can be used + to subtract vectors or matrices. + + The array x. + The array y. + The result of the subtraction. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise multiplication of two arrays z = x * y. This can be used + to multiple elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise multiplication. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise division of two arrays z = x / y. This can be used + to divide elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise division. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise power of two arrays z = x ^ y. This can be used + to raise elements of vectors or matrices to the powers of another vector or matrix. + + The array x. + The array y. + The result of the point wise power. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Computes the requested of the matrix. + + The type of norm to compute. + The number of rows. + The number of columns. + The matrix to compute the norm from. + The requested of the matrix. + + + + Multiples two matrices. result = x * y + + The x matrix. + The number of rows in the x matrix. + The number of columns in the x matrix. + The y matrix. + The number of rows in the y matrix. + The number of columns in the y matrix. + Where to store the result of the multiplication. + This is a simplified version of the BLAS GEMM routine with alpha + set to 1.0 and beta set to 0.0, and x and y are not transposed. + + + + Multiplies two matrices and updates another with the result. c = alpha*op(a)*op(b) + beta*c + + How to transpose the matrix. + How to transpose the matrix. + The value to scale matrix. + The a matrix. + The number of rows in the matrix. + The number of columns in the matrix. + The b matrix + The number of rows in the matrix. + The number of columns in the matrix. + The value to scale the matrix. + The c matrix. + + + + Cache-Oblivious Matrix Multiplication + + if set to true transpose matrix A. + if set to true transpose matrix B. + The value to scale the matrix A with. + The matrix A. + Row-shift of the left matrix + Column-shift of the left matrix + The matrix B. + Row-shift of the right matrix + Column-shift of the right matrix + The matrix C. + Row-shift of the result matrix + Column-shift of the result matrix + The number of rows of matrix op(A) and of the matrix C. + The number of columns of matrix op(B) and of the matrix C. + The number of columns of matrix op(A) and the rows of the matrix op(B). + The constant number of rows of matrix op(A) and of the matrix C. + The constant number of columns of matrix op(B) and of the matrix C. + The constant number of columns of matrix op(A) and the rows of the matrix op(B). + Indicates if this is the first recursion. + + + + Computes the LUP factorization of A. P*A = L*U. + + An by matrix. The matrix is overwritten with the + the LU factorization on exit. The lower triangular factor L is stored in under the diagonal of (the diagonal is always 1.0 + for the L factor). The upper triangular factor U is stored on and above the diagonal of . + The order of the square matrix . + On exit, it contains the pivot indices. The size of the array must be . + This is equivalent to the GETRF LAPACK routine. + + + + Computes the inverse of matrix using LU factorization. + + The N by N matrix to invert. Contains the inverse On exit. + The order of the square matrix . + This is equivalent to the GETRF and GETRI LAPACK routines. + + + + Computes the inverse of a previously factored matrix. + + The LU factored N by N matrix. Contains the inverse On exit. + The order of the square matrix . + The pivot indices of . + This is equivalent to the GETRI LAPACK routine. + + + + Solves A*X=B for X using LU factorization. + + The number of columns of B. + The square matrix A. + The order of the square matrix . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRF and GETRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The number of columns of B. + The factored A matrix. + The order of the square matrix . + The pivot indices of . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRS LAPACK routine. + + + + Computes the Cholesky factorization of A. + + On entry, a square, positive definite matrix. On exit, the matrix is overwritten with the + the Cholesky factorization. + The number of rows or columns in the matrix. + This is equivalent to the POTRF LAPACK routine. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves A*X=B for X using Cholesky factorization. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRF add POTRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRS LAPACK routine. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. Has to be different than . + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The column to solve for. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the R matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A M by M matrix that holds the Q matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the Q matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A N by N matrix that holds the R matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Perform calculation of Q or R + + Work array + Index of column in work array + Q or R matrices + The first row in + The last row + The first column + The last column + Number of available CPUs + + + + Generate column from initial matrix to work array + + Work array + Initial matrix + The number of rows in matrix + The first row + Column index + + + + Solves A*X=B for X using QR factorization of A. + + The A matrix. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Solves A*X=B for X using a previously QR factored matrix. + + The Q matrix obtained by calling . + The R matrix obtained by calling . + The number of rows in the A matrix. + The number of columns in the A matrix. + Contains additional information on Q. Only used for the native solver + and can be null for the managed provider. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Computes the singular value decomposition of A. + + Compute the singular U and VT vectors or not. + On entry, the M by N matrix to decompose. On exit, A may be overwritten. + The number of rows in the A matrix. + The number of columns in the A matrix. + The singular values of A in ascending value. + If is true, on exit U contains the left + singular vectors. + If is true, on exit VT contains the transposed + right singular vectors. + This is equivalent to the GESVD LAPACK routine. + + + + Solves A*X=B for X using the singular value decomposition of A. + + On entry, the M by N matrix to decompose. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Solves A*X=B for X using a previously SVD decomposed matrix. + + The number of rows in the A matrix. + The number of columns in the A matrix. + The s values returned by . + The left singular vectors returned by . + The right singular vectors returned by . + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Computes the eigenvalues and eigenvectors of a matrix. + + Whether the matrix is symmetric or not. + The order of the matrix. + The matrix to decompose. The lenth of the array must be order * order. + On output, the matrix contains the eigen vectors. The lenth of the array must be order * order. + On output, the eigen values (λ) of matrix in ascending value. The length of the arry must . + On output, the block diagonal eigenvalue matrix. The lenth of the array must be order * order. + + + + Assumes that and have already been transposed. + + + + + Assumes that and have already been transposed. + + + + + Adds a scaled vector to another: result = y + alpha*x. + + The vector to update. + The value to scale by. + The vector to add to . + The result of the addition. + This is similar to the AXPY BLAS routine. + + + + Scales an array. Can be used to scale a vector and a matrix. + + The scalar. + The values to scale. + This result of the scaling. + This is similar to the SCAL BLAS routine. + + + + Conjugates an array. Can be used to conjugate a vector and a matrix. + + The values to conjugate. + This result of the conjugation. + + + + Computes the dot product of x and y. + + The vector x. + The vector y. + The dot product of x and y. + This is equivalent to the DOT BLAS routine. + + + + Does a point wise add of two arrays z = x + y. This can be used + to add vectors or matrices. + + The array x. + The array y. + The result of the addition. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise subtraction of two arrays z = x - y. This can be used + to subtract vectors or matrices. + + The array x. + The array y. + The result of the subtraction. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise multiplication of two arrays z = x * y. This can be used + to multiple elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise multiplication. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise division of two arrays z = x / y. This can be used + to divide elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise division. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise power of two arrays z = x ^ y. This can be used + to raise elements of vectors or matrices to the powers of another vector or matrix. + + The array x. + The array y. + The result of the point wise power. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Computes the requested of the matrix. + + The type of norm to compute. + The number of rows. + The number of columns. + The matrix to compute the norm from. + + The requested of the matrix. + + + + + Multiples two matrices. result = x * y + + The x matrix. + The number of rows in the x matrix. + The number of columns in the x matrix. + The y matrix. + The number of rows in the y matrix. + The number of columns in the y matrix. + Where to store the result of the multiplication. + This is a simplified version of the BLAS GEMM routine with alpha + set to 1.0 and beta set to 0.0, and x and y are not transposed. + + + + Multiplies two matrices and updates another with the result. c = alpha*op(a)*op(b) + beta*c + + How to transpose the matrix. + How to transpose the matrix. + The value to scale matrix. + The a matrix. + The number of rows in the matrix. + The number of columns in the matrix. + The b matrix + The number of rows in the matrix. + The number of columns in the matrix. + The value to scale the matrix. + The c matrix. + + + + Cache-Oblivious Matrix Multiplication + + if set to true transpose matrix A. + if set to true transpose matrix B. + The value to scale the matrix A with. + The matrix A. + Row-shift of the left matrix + Column-shift of the left matrix + The matrix B. + Row-shift of the right matrix + Column-shift of the right matrix + The matrix C. + Row-shift of the result matrix + Column-shift of the result matrix + The number of rows of matrix op(A) and of the matrix C. + The number of columns of matrix op(B) and of the matrix C. + The number of columns of matrix op(A) and the rows of the matrix op(B). + The constant number of rows of matrix op(A) and of the matrix C. + The constant number of columns of matrix op(B) and of the matrix C. + The constant number of columns of matrix op(A) and the rows of the matrix op(B). + Indicates if this is the first recursion. + + + + Computes the LUP factorization of A. P*A = L*U. + + An by matrix. The matrix is overwritten with the + the LU factorization on exit. The lower triangular factor L is stored in under the diagonal of (the diagonal is always 1.0 + for the L factor). The upper triangular factor U is stored on and above the diagonal of . + The order of the square matrix . + On exit, it contains the pivot indices. The size of the array must be . + This is equivalent to the GETRF LAPACK routine. + + + + Computes the inverse of matrix using LU factorization. + + The N by N matrix to invert. Contains the inverse On exit. + The order of the square matrix . + This is equivalent to the GETRF and GETRI LAPACK routines. + + + + Computes the inverse of a previously factored matrix. + + The LU factored N by N matrix. Contains the inverse On exit. + The order of the square matrix . + The pivot indices of . + This is equivalent to the GETRI LAPACK routine. + + + + Solves A*X=B for X using LU factorization. + + The number of columns of B. + The square matrix A. + The order of the square matrix . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRF and GETRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The number of columns of B. + The factored A matrix. + The order of the square matrix . + The pivot indices of . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRS LAPACK routine. + + + + Computes the Cholesky factorization of A. + + On entry, a square, positive definite matrix. On exit, the matrix is overwritten with the + the Cholesky factorization. + The number of rows or columns in the matrix. + This is equivalent to the POTRF LAPACK routine. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves A*X=B for X using Cholesky factorization. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRF add POTRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. + The number of rows and columns in A. + The B matrix. + The number of columns in the B matrix. + This is equivalent to the POTRS LAPACK routine. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. Has to be different than . + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The column to solve for. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the R matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A M by M matrix that holds the Q matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the Q matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A N by N matrix that holds the R matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Perform calculation of Q or R + + Work array + Index of column in work array + Q or R matrices + The first row in + The last row + The first column + The last column + Number of available CPUs + + + + Generate column from initial matrix to work array + + Work array + Initial matrix + The number of rows in matrix + The first row + Column index + + + + Solves A*X=B for X using QR factorization of A. + + The A matrix. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Solves A*X=B for X using a previously QR factored matrix. + + The Q matrix obtained by calling . + The R matrix obtained by calling . + The number of rows in the A matrix. + The number of columns in the A matrix. + Contains additional information on Q. Only used for the native solver + and can be null for the managed provider. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Computes the singular value decomposition of A. + + Compute the singular U and VT vectors or not. + On entry, the M by N matrix to decompose. On exit, A may be overwritten. + The number of rows in the A matrix. + The number of columns in the A matrix. + The singular values of A in ascending value. + If is true, on exit U contains the left + singular vectors. + If is true, on exit VT contains the transposed + right singular vectors. + This is equivalent to the GESVD LAPACK routine. + + + + Solves A*X=B for X using the singular value decomposition of A. + + On entry, the M by N matrix to decompose. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Solves A*X=B for X using a previously SVD decomposed matrix. + + The number of rows in the A matrix. + The number of columns in the A matrix. + The s values returned by . + The left singular vectors returned by . + The right singular vectors returned by . + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Computes the eigenvalues and eigenvectors of a matrix. + + Whether the matrix is symmetric or not. + The order of the matrix. + The matrix to decompose. The lenth of the array must be order * order. + On output, the matrix contains the eigen vectors. The lenth of the array must be order * order. + On output, the eigen values (λ) of matrix in ascending value. The length of the arry must . + On output, the block diagonal eigenvalue matrix. The lenth of the array must be order * order. + + + + Assumes that and have already been transposed. + + + + + Assumes that and have already been transposed. + + + + + Adds a scaled vector to another: result = y + alpha*x. + + The vector to update. + The value to scale by. + The vector to add to . + The result of the addition. + This is similar to the AXPY BLAS routine. + + + + Scales an array. Can be used to scale a vector and a matrix. + + The scalar. + The values to scale. + This result of the scaling. + This is similar to the SCAL BLAS routine. + + + + Conjugates an array. Can be used to conjugate a vector and a matrix. + + The values to conjugate. + This result of the conjugation. + + + + Computes the dot product of x and y. + + The vector x. + The vector y. + The dot product of x and y. + This is equivalent to the DOT BLAS routine. + + + + Does a point wise add of two arrays z = x + y. This can be used + to add vectors or matrices. + + The array x. + The array y. + The result of the addition. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise subtraction of two arrays z = x - y. This can be used + to subtract vectors or matrices. + + The array x. + The array y. + The result of the subtraction. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise multiplication of two arrays z = x * y. This can be used + to multiple elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise multiplication. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise division of two arrays z = x / y. This can be used + to divide elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise division. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise power of two arrays z = x ^ y. This can be used + to raise elements of vectors or matrices to the powers of another vector or matrix. + + The array x. + The array y. + The result of the point wise power. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Computes the requested of the matrix. + + The type of norm to compute. + The number of rows. + The number of columns. + The matrix to compute the norm from. + + The requested of the matrix. + + + + + Multiples two matrices. result = x * y + + The x matrix. + The number of rows in the x matrix. + The number of columns in the x matrix. + The y matrix. + The number of rows in the y matrix. + The number of columns in the y matrix. + Where to store the result of the multiplication. + This is a simplified version of the BLAS GEMM routine with alpha + set to 1.0 and beta set to 0.0, and x and y are not transposed. + + + + Multiplies two matrices and updates another with the result. c = alpha*op(a)*op(b) + beta*c + + How to transpose the matrix. + How to transpose the matrix. + The value to scale matrix. + The a matrix. + The number of rows in the matrix. + The number of columns in the matrix. + The b matrix + The number of rows in the matrix. + The number of columns in the matrix. + The value to scale the matrix. + The c matrix. + + + + Cache-Oblivious Matrix Multiplication + + if set to true transpose matrix A. + if set to true transpose matrix B. + The value to scale the matrix A with. + The matrix A. + Row-shift of the left matrix + Column-shift of the left matrix + The matrix B. + Row-shift of the right matrix + Column-shift of the right matrix + The matrix C. + Row-shift of the result matrix + Column-shift of the result matrix + The number of rows of matrix op(A) and of the matrix C. + The number of columns of matrix op(B) and of the matrix C. + The number of columns of matrix op(A) and the rows of the matrix op(B). + The constant number of rows of matrix op(A) and of the matrix C. + The constant number of columns of matrix op(B) and of the matrix C. + The constant number of columns of matrix op(A) and the rows of the matrix op(B). + Indicates if this is the first recursion. + + + + Computes the LUP factorization of A. P*A = L*U. + + An by matrix. The matrix is overwritten with the + the LU factorization on exit. The lower triangular factor L is stored in under the diagonal of (the diagonal is always 1.0 + for the L factor). The upper triangular factor U is stored on and above the diagonal of . + The order of the square matrix . + On exit, it contains the pivot indices. The size of the array must be . + This is equivalent to the GETRF LAPACK routine. + + + + Computes the inverse of matrix using LU factorization. + + The N by N matrix to invert. Contains the inverse On exit. + The order of the square matrix . + This is equivalent to the GETRF and GETRI LAPACK routines. + + + + Computes the inverse of a previously factored matrix. + + The LU factored N by N matrix. Contains the inverse On exit. + The order of the square matrix . + The pivot indices of . + This is equivalent to the GETRI LAPACK routine. + + + + Solves A*X=B for X using LU factorization. + + The number of columns of B. + The square matrix A. + The order of the square matrix . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRF and GETRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The number of columns of B. + The factored A matrix. + The order of the square matrix . + The pivot indices of . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRS LAPACK routine. + + + + Computes the Cholesky factorization of A. + + On entry, a square, positive definite matrix. On exit, the matrix is overwritten with the + the Cholesky factorization. + The number of rows or columns in the matrix. + This is equivalent to the POTRF LAPACK routine. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves A*X=B for X using Cholesky factorization. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRF add POTRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. Has to be different than . + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRS LAPACK routine. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. Has to be different than . + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The column to solve for. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the R matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A M by M matrix that holds the Q matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the Q matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A N by N matrix that holds the R matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Perform calculation of Q or R + + Work array + Index of column in work array + Q or R matrices + The first row in + The last row + The first column + The last column + Number of available CPUs + + + + Generate column from initial matrix to work array + + Work array + Initial matrix + The number of rows in matrix + The first row + Column index + + + + Solves A*X=B for X using QR factorization of A. + + The A matrix. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Solves A*X=B for X using a previously QR factored matrix. + + The Q matrix obtained by calling . + The R matrix obtained by calling . + The number of rows in the A matrix. + The number of columns in the A matrix. + Contains additional information on Q. Only used for the native solver + and can be null for the managed provider. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Computes the singular value decomposition of A. + + Compute the singular U and VT vectors or not. + On entry, the M by N matrix to decompose. On exit, A may be overwritten. + The number of rows in the A matrix. + The number of columns in the A matrix. + The singular values of A in ascending value. + If is true, on exit U contains the left + singular vectors. + If is true, on exit VT contains the transposed + right singular vectors. + This is equivalent to the GESVD LAPACK routine. + + + + Given the Cartesian coordinates (da, db) of a point p, these function return the parameters da, db, c, and s + associated with the Givens rotation that zeros the y-coordinate of the point. + + Provides the x-coordinate of the point p. On exit contains the parameter r associated with the Givens rotation + Provides the y-coordinate of the point p. On exit contains the parameter z associated with the Givens rotation + Contains the parameter c associated with the Givens rotation + Contains the parameter s associated with the Givens rotation + This is equivalent to the DROTG LAPACK routine. + + + + Solves A*X=B for X using the singular value decomposition of A. + + On entry, the M by N matrix to decompose. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Solves A*X=B for X using a previously SVD decomposed matrix. + + The number of rows in the A matrix. + The number of columns in the A matrix. + The s values returned by . + The left singular vectors returned by . + The right singular vectors returned by . + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Computes the eigenvalues and eigenvectors of a matrix. + + Whether the matrix is symmetric or not. + The order of the matrix. + The matrix to decompose. The lenth of the array must be order * order. + On output, the matrix contains the eigen vectors. The lenth of the array must be order * order. + On output, the eigen values (λ) of matrix in ascending value. The length of the arry must . + On output, the block diagonal eigenvalue matrix. The lenth of the array must be order * order. + + + + Adds a scaled vector to another: result = y + alpha*x. + + The vector to update. + The value to scale by. + The vector to add to . + The result of the addition. + This is similar to the AXPY BLAS routine. + + + + Scales an array. Can be used to scale a vector and a matrix. + + The scalar. + The values to scale. + This result of the scaling. + This is similar to the SCAL BLAS routine. + + + + Conjugates an array. Can be used to conjugate a vector and a matrix. + + The values to conjugate. + This result of the conjugation. + + + + Computes the dot product of x and y. + + The vector x. + The vector y. + The dot product of x and y. + This is equivalent to the DOT BLAS routine. + + + + Does a point wise add of two arrays z = x + y. This can be used + to add vectors or matrices. + + The array x. + The array y. + The result of the addition. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise subtraction of two arrays z = x - y. This can be used + to subtract vectors or matrices. + + The array x. + The array y. + The result of the subtraction. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise multiplication of two arrays z = x * y. This can be used + to multiple elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise multiplication. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise division of two arrays z = x / y. This can be used + to divide elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise division. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise power of two arrays z = x ^ y. This can be used + to raise elements of vectors or matrices to the powers of another vector or matrix. + + The array x. + The array y. + The result of the point wise power. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Computes the requested of the matrix. + + The type of norm to compute. + The number of rows. + The number of columns. + The matrix to compute the norm from. + + The requested of the matrix. + + + + + Multiples two matrices. result = x * y + + The x matrix. + The number of rows in the x matrix. + The number of columns in the x matrix. + The y matrix. + The number of rows in the y matrix. + The number of columns in the y matrix. + Where to store the result of the multiplication. + This is a simplified version of the BLAS GEMM routine with alpha + set to 1.0 and beta set to 0.0, and x and y are not transposed. + + + + Multiplies two matrices and updates another with the result. c = alpha*op(a)*op(b) + beta*c + + How to transpose the matrix. + How to transpose the matrix. + The value to scale matrix. + The a matrix. + The number of rows in the matrix. + The number of columns in the matrix. + The b matrix + The number of rows in the matrix. + The number of columns in the matrix. + The value to scale the matrix. + The c matrix. + + + + Cache-Oblivious Matrix Multiplication + + if set to true transpose matrix A. + if set to true transpose matrix B. + The value to scale the matrix A with. + The matrix A. + Row-shift of the left matrix + Column-shift of the left matrix + The matrix B. + Row-shift of the right matrix + Column-shift of the right matrix + The matrix C. + Row-shift of the result matrix + Column-shift of the result matrix + The number of rows of matrix op(A) and of the matrix C. + The number of columns of matrix op(B) and of the matrix C. + The number of columns of matrix op(A) and the rows of the matrix op(B). + The constant number of rows of matrix op(A) and of the matrix C. + The constant number of columns of matrix op(B) and of the matrix C. + The constant number of columns of matrix op(A) and the rows of the matrix op(B). + Indicates if this is the first recursion. + + + + Computes the LUP factorization of A. P*A = L*U. + + An by matrix. The matrix is overwritten with the + the LU factorization on exit. The lower triangular factor L is stored in under the diagonal of (the diagonal is always 1.0 + for the L factor). The upper triangular factor U is stored on and above the diagonal of . + The order of the square matrix . + On exit, it contains the pivot indices. The size of the array must be . + This is equivalent to the GETRF LAPACK routine. + + + + Computes the inverse of matrix using LU factorization. + + The N by N matrix to invert. Contains the inverse On exit. + The order of the square matrix . + This is equivalent to the GETRF and GETRI LAPACK routines. + + + + Computes the inverse of a previously factored matrix. + + The LU factored N by N matrix. Contains the inverse On exit. + The order of the square matrix . + The pivot indices of . + This is equivalent to the GETRI LAPACK routine. + + + + Solves A*X=B for X using LU factorization. + + The number of columns of B. + The square matrix A. + The order of the square matrix . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRF and GETRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The number of columns of B. + The factored A matrix. + The order of the square matrix . + The pivot indices of . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRS LAPACK routine. + + + + Computes the Cholesky factorization of A. + + On entry, a square, positive definite matrix. On exit, the matrix is overwritten with the + the Cholesky factorization. + The number of rows or columns in the matrix. + This is equivalent to the POTRF LAPACK routine. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves A*X=B for X using Cholesky factorization. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRF add POTRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRS LAPACK routine. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. Has to be different than . + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The column to solve for. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the R matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A M by M matrix that holds the Q matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the Q matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A N by N matrix that holds the R matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Perform calculation of Q or R + + Work array + Index of column in work array + Q or R matrices + The first row in + The last row + The first column + The last column + Number of available CPUs + + + + Generate column from initial matrix to work array + + Work array + Initial matrix + The number of rows in matrix + The first row + Column index + + + + Solves A*X=B for X using QR factorization of A. + + The A matrix. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Solves A*X=B for X using a previously QR factored matrix. + + The Q matrix obtained by calling . + The R matrix obtained by calling . + The number of rows in the A matrix. + The number of columns in the A matrix. + Contains additional information on Q. Only used for the native solver + and can be null for the managed provider. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Computes the singular value decomposition of A. + + Compute the singular U and VT vectors or not. + On entry, the M by N matrix to decompose. On exit, A may be overwritten. + The number of rows in the A matrix. + The number of columns in the A matrix. + The singular values of A in ascending value. + If is true, on exit U contains the left + singular vectors. + If is true, on exit VT contains the transposed + right singular vectors. + This is equivalent to the GESVD LAPACK routine. + + + + Given the Cartesian coordinates (da, db) of a point p, these function return the parameters da, db, c, and s + associated with the Givens rotation that zeros the y-coordinate of the point. + + Provides the x-coordinate of the point p. On exit contains the parameter r associated with the Givens rotation + Provides the y-coordinate of the point p. On exit contains the parameter z associated with the Givens rotation + Contains the parameter c associated with the Givens rotation + Contains the parameter s associated with the Givens rotation + This is equivalent to the DROTG LAPACK routine. + + + + Solves A*X=B for X using the singular value decomposition of A. + + On entry, the M by N matrix to decompose. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Solves A*X=B for X using a previously SVD decomposed matrix. + + The number of rows in the A matrix. + The number of columns in the A matrix. + The s values returned by . + The left singular vectors returned by . + The right singular vectors returned by . + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Computes the eigenvalues and eigenvectors of a matrix. + + Whether the matrix is symmetric or not. + The order of the matrix. + The matrix to decompose. The lenth of the array must be order * order. + On output, the matrix contains the eigen vectors. The lenth of the array must be order * order. + On output, the eigen values (λ) of matrix in ascending value. The length of the arry must . + On output, the block diagonal eigenvalue matrix. The lenth of the array must be order * order. + + + + Try to find out whether the provider is available, at least in principle. + Verification may still fail if available, but it will certainly fail if unavailable. + + + + + Initialize and verify that the provided is indeed available. If not, fall back to alternatives like the managed provider + + + + + Assumes that and have already been transposed. + + + + + Assumes that and have already been transposed. + + + + + How to transpose a matrix. + + + + + Don't transpose a matrix. + + + + + Transpose a matrix. + + + + + Conjugate transpose a complex matrix. + + If a conjugate transpose is used with a real matrix, then the matrix is just transposed. + + + + Types of matrix norms. + + + + + The 1-norm. + + + + + The Frobenius norm. + + + + + The infinity norm. + + + + + The largest absolute value norm. + + + + + Interface to linear algebra algorithms that work off 1-D arrays. + + + + + Try to find out whether the provider is available, at least in principle. + Verification may still fail if available, but it will certainly fail if unavailable. + + + + + Initialize and verify that the provided is indeed available. If not, fall back to alternatives like the managed provider + + + + + Interface to linear algebra algorithms that work off 1-D arrays. + + Supported data types are Double, Single, Complex, and Complex32. + + + + Adds a scaled vector to another: result = y + alpha*x. + + The vector to update. + The value to scale by. + The vector to add to . + The result of the addition. + This is similar to the AXPY BLAS routine. + + + + Scales an array. Can be used to scale a vector and a matrix. + + The scalar. + The values to scale. + This result of the scaling. + This is similar to the SCAL BLAS routine. + + + + Conjugates an array. Can be used to conjugate a vector and a matrix. + + The values to conjugate. + This result of the conjugation. + + + + Computes the dot product of x and y. + + The vector x. + The vector y. + The dot product of x and y. + This is equivalent to the DOT BLAS routine. + + + + Does a point wise add of two arrays z = x + y. This can be used + to add vectors or matrices. + + The array x. + The array y. + The result of the addition. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise subtraction of two arrays z = x - y. This can be used + to subtract vectors or matrices. + + The array x. + The array y. + The result of the subtraction. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise multiplication of two arrays z = x * y. This can be used + to multiply elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise multiplication. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise division of two arrays z = x / y. This can be used + to divide elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise division. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise power of two arrays z = x ^ y. This can be used + to raise elements of vectors or matrices to the powers of another vector or matrix. + + The array x. + The array y. + The result of the point wise power. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Computes the requested of the matrix. + + The type of norm to compute. + The number of rows. + The number of columns. + The matrix to compute the norm from. + + The requested of the matrix. + + + + + Multiples two matrices. result = x * y + + The x matrix. + The number of rows in the x matrix. + The number of columns in the x matrix. + The y matrix. + The number of rows in the y matrix. + The number of columns in the y matrix. + Where to store the result of the multiplication. + This is a simplified version of the BLAS GEMM routine with alpha + set to 1.0 and beta set to 0.0, and x and y are not transposed. + + + + Multiplies two matrices and updates another with the result. c = alpha*op(a)*op(b) + beta*c + + How to transpose the matrix. + How to transpose the matrix. + The value to scale matrix. + The a matrix. + The number of rows in the matrix. + The number of columns in the matrix. + The b matrix + The number of rows in the matrix. + The number of columns in the matrix. + The value to scale the matrix. + The c matrix. + + + + Computes the LUP factorization of A. P*A = L*U. + + An by matrix. The matrix is overwritten with the + the LU factorization on exit. The lower triangular factor L is stored in under the diagonal of (the diagonal is always 1.0 + for the L factor). The upper triangular factor U is stored on and above the diagonal of . + The order of the square matrix . + On exit, it contains the pivot indices. The size of the array must be . + This is equivalent to the GETRF LAPACK routine. + + + + Computes the inverse of matrix using LU factorization. + + The N by N matrix to invert. Contains the inverse On exit. + The order of the square matrix . + This is equivalent to the GETRF and GETRI LAPACK routines. + + + + Computes the inverse of a previously factored matrix. + + The LU factored N by N matrix. Contains the inverse On exit. + The order of the square matrix . + The pivot indices of . + This is equivalent to the GETRI LAPACK routine. + + + + Solves A*X=B for X using LU factorization. + + The number of columns of B. + The square matrix A. + The order of the square matrix . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRF and GETRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The number of columns of B. + The factored A matrix. + The order of the square matrix . + The pivot indices of . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRS LAPACK routine. + + + + Computes the Cholesky factorization of A. + + On entry, a square, positive definite matrix. On exit, the matrix is overwritten with the + the Cholesky factorization. + The number of rows or columns in the matrix. + This is equivalent to the POTRF LAPACK routine. + + + + Solves A*X=B for X using Cholesky factorization. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRF add POTRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRS LAPACK routine. + + + + Computes the full QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the R matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A M by M matrix that holds the Q matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Computes the thin QR factorization of A where M > N. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the Q matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A N by N matrix that holds the R matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Solves A*X=B for X using QR factorization of A. + + The A matrix. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Solves A*X=B for X using a previously QR factored matrix. + + The Q matrix obtained by QR factor. This is only used for the managed provider and can be + null for the native provider. The native provider uses the Q portion stored in the R matrix. + The R matrix obtained by calling . + The number of rows in the A matrix. + The number of columns in the A matrix. + Contains additional information on Q. Only used for the native solver + and can be null for the managed provider. + On entry the B matrix; on exit the X matrix. + The number of columns of B. + On exit, the solution matrix. + Rows must be greater or equal to columns. + The type of QR factorization to perform. + + + + Computes the singular value decomposition of A. + + Compute the singular U and VT vectors or not. + On entry, the M by N matrix to decompose. On exit, A may be overwritten. + The number of rows in the A matrix. + The number of columns in the A matrix. + The singular values of A in ascending value. + If is true, on exit U contains the left + singular vectors. + If is true, on exit VT contains the transposed + right singular vectors. + This is equivalent to the GESVD LAPACK routine. + + + + Solves A*X=B for X using the singular value decomposition of A. + + On entry, the M by N matrix to decompose. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Solves A*X=B for X using a previously SVD decomposed matrix. + + The number of rows in the A matrix. + The number of columns in the A matrix. + The s values returned by . + The left singular vectors returned by . + The right singular vectors returned by . + The B matrix + The number of columns of B. + On exit, the solution matrix. + + + + Computes the eigenvalues and eigenvectors of a matrix. + + Whether the matrix is symmetric or not. + The order of the matrix. + The matrix to decompose. The lenth of the array must be order * order. + On output, the matrix contains the eigen vectors. The lenth of the array must be order * order. + On output, the eigen values (λ) of matrix in ascending value. The length of the arry must . + On output, the block diagonal eigenvalue matrix. The lenth of the array must be order * order. + + + + Intel's Math Kernel Library (MKL) linear algebra provider. + + + Intel's Math Kernel Library (MKL) linear algebra provider. + + + Intel's Math Kernel Library (MKL) linear algebra provider. + + + Intel's Math Kernel Library (MKL) linear algebra provider. + + + Intel's Math Kernel Library (MKL) linear algebra provider. + + + + + Computes the requested of the matrix. + + The type of norm to compute. + The number of rows in the matrix. + The number of columns in the matrix. + The matrix to compute the norm from. + + The requested of the matrix. + + + + + Computes the dot product of x and y. + + The vector x. + The vector y. + The dot product of x and y. + This is equivalent to the DOT BLAS routine. + + + + Adds a scaled vector to another: result = y + alpha*x. + + The vector to update. + The value to scale by. + The vector to add to . + The result of the addition. + This is similar to the AXPY BLAS routine. + + + + Scales an array. Can be used to scale a vector and a matrix. + + The scalar. + The values to scale. + This result of the scaling. + This is similar to the SCAL BLAS routine. + + + + Multiples two matrices. result = x * y + + The x matrix. + The number of rows in the x matrix. + The number of columns in the x matrix. + The y matrix. + The number of rows in the y matrix. + The number of columns in the y matrix. + Where to store the result of the multiplication. + This is a simplified version of the BLAS GEMM routine with alpha + set to Complex.One and beta set to Complex.Zero, and x and y are not transposed. + + + + Multiplies two matrices and updates another with the result. c = alpha*op(a)*op(b) + beta*c + + How to transpose the matrix. + How to transpose the matrix. + The value to scale matrix. + The a matrix. + The number of rows in the matrix. + The number of columns in the matrix. + The b matrix + The number of rows in the matrix. + The number of columns in the matrix. + The value to scale the matrix. + The c matrix. + + + + Computes the LUP factorization of A. P*A = L*U. + + An by matrix. The matrix is overwritten with the + the LU factorization on exit. The lower triangular factor L is stored in under the diagonal of (the diagonal is always Complex.One + for the L factor). The upper triangular factor U is stored on and above the diagonal of . + The order of the square matrix . + On exit, it contains the pivot indices. The size of the array must be . + This is equivalent to the GETRF LAPACK routine. + + + + Computes the inverse of matrix using LU factorization. + + The N by N matrix to invert. Contains the inverse On exit. + The order of the square matrix . + This is equivalent to the GETRF and GETRI LAPACK routines. + + + + Computes the inverse of a previously factored matrix. + + The LU factored N by N matrix. Contains the inverse On exit. + The order of the square matrix . + The pivot indices of . + This is equivalent to the GETRI LAPACK routine. + + + + Solves A*X=B for X using LU factorization. + + The number of columns of B. + The square matrix A. + The order of the square matrix . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRF and GETRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The number of columns of B. + The factored A matrix. + The order of the square matrix . + The pivot indices of . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRS LAPACK routine. + + + + Computes the Cholesky factorization of A. + + On entry, a square, positive definite matrix. On exit, the matrix is overwritten with the + the Cholesky factorization. + The number of rows or columns in the matrix. + This is equivalent to the POTRF LAPACK routine. + + + + Solves A*X=B for X using Cholesky factorization. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRF add POTRS LAPACK routines. + + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRS LAPACK routine. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the R matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A M by M matrix that holds the Q matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Computes the thin QR factorization of A where M > N. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the Q matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A N by N matrix that holds the R matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Solves A*X=B for X using QR factorization of A. + + The A matrix. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Solves A*X=B for X using a previously QR factored matrix. + + The Q matrix obtained by calling . + The R matrix obtained by calling . + The number of rows in the A matrix. + The number of columns in the A matrix. + Contains additional information on Q. Only used for the native solver + and can be null for the managed provider. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Solves A*X=B for X using the singular value decomposition of A. + + On entry, the M by N matrix to decompose. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Computes the singular value decomposition of A. + + Compute the singular U and VT vectors or not. + On entry, the M by N matrix to decompose. On exit, A may be overwritten. + The number of rows in the A matrix. + The number of columns in the A matrix. + The singular values of A in ascending value. + If is true, on exit U contains the left + singular vectors. + If is true, on exit VT contains the transposed + right singular vectors. + This is equivalent to the GESVD LAPACK routine. + + + + Does a point wise add of two arrays z = x + y. This can be used + to add vectors or matrices. + + The array x. + The array y. + The result of the addition. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise subtraction of two arrays z = x - y. This can be used + to subtract vectors or matrices. + + The array x. + The array y. + The result of the subtraction. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise multiplication of two arrays z = x * y. This can be used + to multiple elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise multiplication. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise power of two arrays z = x ^ y. This can be used + to raise elements of vectors or matrices to the powers of another vector or matrix. + + The array x. + The array y. + The result of the point wise power. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise division of two arrays z = x / y. This can be used + to divide elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise division. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Computes the eigenvalues and eigenvectors of a matrix. + + Whether the matrix is symmetric or not. + The order of the matrix. + The matrix to decompose. The lenth of the array must be order * order. + On output, the matrix contains the eigen vectors. The lenth of the array must be order * order. + On output, the eigen values (λ) of matrix in ascending value. The length of the arry must . + On output, the block diagonal eigenvalue matrix. The lenth of the array must be order * order. + + + + Computes the requested of the matrix. + + The type of norm to compute. + The number of rows in the matrix. + The number of columns in the matrix. + The matrix to compute the norm from. + + The requested of the matrix. + + + + + Computes the dot product of x and y. + + The vector x. + The vector y. + The dot product of x and y. + This is equivalent to the DOT BLAS routine. + + + + Adds a scaled vector to another: result = y + alpha*x. + + The vector to update. + The value to scale by. + The vector to add to . + The result of the addition. + This is similar to the AXPY BLAS routine. + + + + Scales an array. Can be used to scale a vector and a matrix. + + The scalar. + The values to scale. + This result of the scaling. + This is similar to the SCAL BLAS routine. + + + + Multiples two matrices. result = x * y + + The x matrix. + The number of rows in the x matrix. + The number of columns in the x matrix. + The y matrix. + The number of rows in the y matrix. + The number of columns in the y matrix. + Where to store the result of the multiplication. + This is a simplified version of the BLAS GEMM routine with alpha + set to Complex32.One and beta set to Complex32.Zero, and x and y are not transposed. + + + + Multiplies two matrices and updates another with the result. c = alpha*op(a)*op(b) + beta*c + + How to transpose the matrix. + How to transpose the matrix. + The value to scale matrix. + The a matrix. + The number of rows in the matrix. + The number of columns in the matrix. + The b matrix + The number of rows in the matrix. + The number of columns in the matrix. + The value to scale the matrix. + The c matrix. + + + + Computes the LUP factorization of A. P*A = L*U. + + An by matrix. The matrix is overwritten with the + the LU factorization on exit. The lower triangular factor L is stored in under the diagonal of (the diagonal is always Complex32.One + for the L factor). The upper triangular factor U is stored on and above the diagonal of . + The order of the square matrix . + On exit, it contains the pivot indices. The size of the array must be . + This is equivalent to the GETRF LAPACK routine. + + + + Computes the inverse of matrix using LU factorization. + + The N by N matrix to invert. Contains the inverse On exit. + The order of the square matrix . + This is equivalent to the GETRF and GETRI LAPACK routines. + + + + Computes the inverse of a previously factored matrix. + + The LU factored N by N matrix. Contains the inverse On exit. + The order of the square matrix . + The pivot indices of . + This is equivalent to the GETRI LAPACK routine. + + + + Solves A*X=B for X using LU factorization. + + The number of columns of B. + The square matrix A. + The order of the square matrix . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRF and GETRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The number of columns of B. + The factored A matrix. + The order of the square matrix . + The pivot indices of . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRS LAPACK routine. + + + + Computes the Cholesky factorization of A. + + On entry, a square, positive definite matrix. On exit, the matrix is overwritten with the + the Cholesky factorization. + The number of rows or columns in the matrix. + This is equivalent to the POTRF LAPACK routine. + + + + Solves A*X=B for X using Cholesky factorization. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRF add POTRS LAPACK routines. + + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRS LAPACK routine. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the R matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A M by M matrix that holds the Q matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Computes the thin QR factorization of A where M > N. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the Q matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A N by N matrix that holds the R matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Solves A*X=B for X using QR factorization of A. + + The A matrix. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Solves A*X=B for X using a previously QR factored matrix. + + The Q matrix obtained by calling . + The R matrix obtained by calling . + The number of rows in the A matrix. + The number of columns in the A matrix. + Contains additional information on Q. Only used for the native solver + and can be null for the managed provider. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Solves A*X=B for X using the singular value decomposition of A. + + On entry, the M by N matrix to decompose. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Computes the singular value decomposition of A. + + Compute the singular U and VT vectors or not. + On entry, the M by N matrix to decompose. On exit, A may be overwritten. + The number of rows in the A matrix. + The number of columns in the A matrix. + The singular values of A in ascending value. + If is true, on exit U contains the left + singular vectors. + If is true, on exit VT contains the transposed + right singular vectors. + This is equivalent to the GESVD LAPACK routine. + + + + Does a point wise add of two arrays z = x + y. This can be used + to add vectors or matrices. + + The array x. + The array y. + The result of the addition. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise subtraction of two arrays z = x - y. This can be used + to subtract vectors or matrices. + + The array x. + The array y. + The result of the subtraction. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise multiplication of two arrays z = x * y. This can be used + to multiple elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise multiplication. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise division of two arrays z = x / y. This can be used + to divide elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise division. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise power of two arrays z = x ^ y. This can be used + to raise elements of vectors or matrices to the powers of another vector or matrix. + + The array x. + The array y. + The result of the point wise power. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Computes the eigenvalues and eigenvectors of a matrix. + + Whether the matrix is symmetric or not. + The order of the matrix. + The matrix to decompose. The lenth of the array must be order * order. + On output, the matrix contains the eigen vectors. The lenth of the array must be order * order. + On output, the eigen values (λ) of matrix in ascending value. The length of the arry must . + On output, the block diagonal eigenvalue matrix. The lenth of the array must be order * order. + + + + Sets the desired bit consistency on repeated identical computations on varying CPU architectures, + as a trade-off with performance. + + VML optimal precision and rounding. + VML accuracy mode. + + + + Sets the desired bit consistency on repeated identical computations on varying CPU architectures, + as a trade-off with performance. + + VML optimal precision and rounding. + VML accuracy mode. + + + + Try to find out whether the provider is available, at least in principle. + Verification may still fail if available, but it will certainly fail if unavailable. + + + + + Initialize and verify that the provided is indeed available. + If calling this method fails, consider to fall back to alternatives like the managed provider. + + + + + Frees the memory allocated to the MKL memory pool. + + + + + Frees the memory allocated to the MKL memory pool on the current thread. + + + + + Disable the MKL memory pool. May impact performance. + + + + + Retrieves information about the MKL memory pool. + + On output, returns the number of memory buffers allocated. + Returns the number of bytes allocated to all memory buffers. + + + + Enable gathering of peak memory statistics of the MKL memory pool. + + + + + Disable gathering of peak memory statistics of the MKL memory pool. + + + + + Measures peak memory usage of the MKL memory pool. + + Whether the usage counter should be reset. + The peak number of bytes allocated to all memory buffers. + + + + Computes the requested of the matrix. + + The type of norm to compute. + The number of rows in the matrix. + The number of columns in the matrix. + The matrix to compute the norm from. + + The requested of the matrix. + + + + + Computes the dot product of x and y. + + The vector x. + The vector y. + The dot product of x and y. + This is equivalent to the DOT BLAS routine. + + + + Adds a scaled vector to another: result = y + alpha*x. + + The vector to update. + The value to scale by. + The vector to add to . + The result of the addition. + This is similar to the AXPY BLAS routine. + + + + Scales an array. Can be used to scale a vector and a matrix. + + The scalar. + The values to scale. + This result of the scaling. + This is similar to the SCAL BLAS routine. + + + + Multiples two matrices. result = x * y + + The x matrix. + The number of rows in the x matrix. + The number of columns in the x matrix. + The y matrix. + The number of rows in the y matrix. + The number of columns in the y matrix. + Where to store the result of the multiplication. + This is a simplified version of the BLAS GEMM routine with alpha + set to 1.0 and beta set to 0.0, and x and y are not transposed. + + + + Multiplies two matrices and updates another with the result. c = alpha*op(a)*op(b) + beta*c + + How to transpose the matrix. + How to transpose the matrix. + The value to scale matrix. + The a matrix. + The number of rows in the matrix. + The number of columns in the matrix. + The b matrix + The number of rows in the matrix. + The number of columns in the matrix. + The value to scale the matrix. + The c matrix. + + + + Computes the LUP factorization of A. P*A = L*U. + + An by matrix. The matrix is overwritten with the + the LU factorization on exit. The lower triangular factor L is stored in under the diagonal of (the diagonal is always 1.0 + for the L factor). The upper triangular factor U is stored on and above the diagonal of . + The order of the square matrix . + On exit, it contains the pivot indices. The size of the array must be . + This is equivalent to the GETRF LAPACK routine. + + + + Computes the inverse of matrix using LU factorization. + + The N by N matrix to invert. Contains the inverse On exit. + The order of the square matrix . + This is equivalent to the GETRF and GETRI LAPACK routines. + + + + Computes the inverse of a previously factored matrix. + + The LU factored N by N matrix. Contains the inverse On exit. + The order of the square matrix . + The pivot indices of . + This is equivalent to the GETRI LAPACK routine. + + + + Solves A*X=B for X using LU factorization. + + The number of columns of B. + The square matrix A. + The order of the square matrix . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRF and GETRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The number of columns of B. + The factored A matrix. + The order of the square matrix . + The pivot indices of . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRS LAPACK routine. + + + + Computes the Cholesky factorization of A. + + On entry, a square, positive definite matrix. On exit, the matrix is overwritten with the + the Cholesky factorization. + The number of rows or columns in the matrix. + This is equivalent to the POTRF LAPACK routine. + + + + Solves A*X=B for X using Cholesky factorization. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRF add POTRS LAPACK routines. + + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRS LAPACK routine. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the R matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A M by M matrix that holds the Q matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Computes the thin QR factorization of A where M > N. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the Q matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A N by N matrix that holds the R matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Solves A*X=B for X using QR factorization of A. + + The A matrix. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Solves A*X=B for X using a previously QR factored matrix. + + The Q matrix obtained by calling . + The R matrix obtained by calling . + The number of rows in the A matrix. + The number of columns in the A matrix. + Contains additional information on Q. Only used for the native solver + and can be null for the managed provider. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Solves A*X=B for X using the singular value decomposition of A. + + On entry, the M by N matrix to decompose. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Computes the singular value decomposition of A. + + Compute the singular U and VT vectors or not. + On entry, the M by N matrix to decompose. On exit, A may be overwritten. + The number of rows in the A matrix. + The number of columns in the A matrix. + The singular values of A in ascending value. + If is true, on exit U contains the left + singular vectors. + If is true, on exit VT contains the transposed + right singular vectors. + This is equivalent to the GESVD LAPACK routine. + + + + Does a point wise add of two arrays z = x + y. This can be used + to add vectors or matrices. + + The array x. + The array y. + The result of the addition. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise subtraction of two arrays z = x - y. This can be used + to subtract vectors or matrices. + + The array x. + The array y. + The result of the subtraction. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise multiplication of two arrays z = x * y. This can be used + to multiple elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise multiplication. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise division of two arrays z = x / y. This can be used + to divide elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise division. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise power of two arrays z = x ^ y. This can be used + to raise elements of vectors or matrices to the powers of another vector or matrix. + + The array x. + The array y. + The result of the point wise power. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Computes the eigenvalues and eigenvectors of a matrix. + + Whether the matrix is symmetric or not. + The order of the matrix. + The matrix to decompose. The lenth of the array must be order * order. + On output, the matrix contains the eigen vectors. The lenth of the array must be order * order. + On output, the eigen values (λ) of matrix in ascending value. The length of the arry must . + On output, the block diagonal eigenvalue matrix. The lenth of the array must be order * order. + + + + Computes the requested of the matrix. + + The type of norm to compute. + The number of rows in the matrix. + The number of columns in the matrix. + The matrix to compute the norm from. + + The requested of the matrix. + + + + + Computes the dot product of x and y. + + The vector x. + The vector y. + The dot product of x and y. + This is equivalent to the DOT BLAS routine. + + + + Adds a scaled vector to another: result = y + alpha*x. + + The vector to update. + The value to scale by. + The vector to add to . + The result of the addition. + This is similar to the AXPY BLAS routine. + + + + Scales an array. Can be used to scale a vector and a matrix. + + The scalar. + The values to scale. + This result of the scaling. + This is similar to the SCAL BLAS routine. + + + + Multiples two matrices. result = x * y + + The x matrix. + The number of rows in the x matrix. + The number of columns in the x matrix. + The y matrix. + The number of rows in the y matrix. + The number of columns in the y matrix. + Where to store the result of the multiplication. + This is a simplified version of the BLAS GEMM routine with alpha + set to 1.0f and beta set to 0.0f, and x and y are not transposed. + + + + Multiplies two matrices and updates another with the result. c = alpha*op(a)*op(b) + beta*c + + How to transpose the matrix. + How to transpose the matrix. + The value to scale matrix. + The a matrix. + The number of rows in the matrix. + The number of columns in the matrix. + The b matrix + The number of rows in the matrix. + The number of columns in the matrix. + The value to scale the matrix. + The c matrix. + + + + Computes the LUP factorization of A. P*A = L*U. + + An by matrix. The matrix is overwritten with the + the LU factorization on exit. The lower triangular factor L is stored in under the diagonal of (the diagonal is always 1.0f + for the L factor). The upper triangular factor U is stored on and above the diagonal of . + The order of the square matrix . + On exit, it contains the pivot indices. The size of the array must be . + This is equivalent to the GETRF LAPACK routine. + + + + Computes the inverse of matrix using LU factorization. + + The N by N matrix to invert. Contains the inverse On exit. + The order of the square matrix . + This is equivalent to the GETRF and GETRI LAPACK routines. + + + + Computes the inverse of a previously factored matrix. + + The LU factored N by N matrix. Contains the inverse On exit. + The order of the square matrix . + The pivot indices of . + This is equivalent to the GETRI LAPACK routine. + + + + Solves A*X=B for X using LU factorization. + + The number of columns of B. + The square matrix A. + The order of the square matrix . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRF and GETRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The number of columns of B. + The factored A matrix. + The order of the square matrix . + The pivot indices of . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRS LAPACK routine. + + + + Computes the Cholesky factorization of A. + + On entry, a square, positive definite matrix. On exit, the matrix is overwritten with the + the Cholesky factorization. + The number of rows or columns in the matrix. + This is equivalent to the POTRF LAPACK routine. + + + + Solves A*X=B for X using Cholesky factorization. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRF add POTRS LAPACK routines. + + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRS LAPACK routine. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the R matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A M by M matrix that holds the Q matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Computes the thin QR factorization of A where M > N. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the Q matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A N by N matrix that holds the R matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Solves A*X=B for X using QR factorization of A. + + The A matrix. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Solves A*X=B for X using a previously QR factored matrix. + + The Q matrix obtained by calling . + The R matrix obtained by calling . + The number of rows in the A matrix. + The number of columns in the A matrix. + Contains additional information on Q. Only used for the native solver + and can be null for the managed provider. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Solves A*X=B for X using the singular value decomposition of A. + + On entry, the M by N matrix to decompose. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Computes the singular value decomposition of A. + + Compute the singular U and VT vectors or not. + On entry, the M by N matrix to decompose. On exit, A may be overwritten. + The number of rows in the A matrix. + The number of columns in the A matrix. + The singular values of A in ascending value. + If is true, on exit U contains the left + singular vectors. + If is true, on exit VT contains the transposed + right singular vectors. + This is equivalent to the GESVD LAPACK routine. + + + + Does a point wise add of two arrays z = x + y. This can be used + to add vectors or matrices. + + The array x. + The array y. + The result of the addition. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise subtraction of two arrays z = x - y. This can be used + to subtract vectors or matrices. + + The array x. + The array y. + The result of the subtraction. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise multiplication of two arrays z = x * y. This can be used + to multiple elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise multiplication. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise division of two arrays z = x / y. This can be used + to divide elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise division. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise power of two arrays z = x ^ y. This can be used + to raise elements of vectors or matrices to the powers of another vector or matrix. + + The array x. + The array y. + The result of the point wise power. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Computes the eigenvalues and eigenvectors of a matrix. + + Whether the matrix is symmetric or not. + The order of the matrix. + The matrix to decompose. The lenth of the array must be order * order. + On output, the matrix contains the eigen vectors. The lenth of the array must be order * order. + On output, the eigen values (λ) of matrix in ascending value. The length of the arry must . + On output, the block diagonal eigenvalue matrix. The lenth of the array must be order * order. + + + + Error codes return from the MKL provider. + + + + + Unable to allocate memory. + + + + + Consistency vs. performance trade-off between runs on different machines. + + + + Consistent on the same CPU only (maximum performance) + + + Consistent on Intel and compatible CPUs with SSE2 support (maximum compatibility) + + + Consistent on Intel CPUs supporting SSE2 or later + + + Consistent on Intel CPUs supporting SSE4.2 or later + + + Consistent on Intel CPUs supporting AVX or later + + + Consistent on Intel CPUs supporting AVX2 or later + + + + Useful extension methods for Arrays. + + + + + Copies the values from on array to another. + + The source array. + The destination array. + + + + Copies the values from on array to another. + + The source array. + The destination array. + + + + Copies the values from on array to another. + + The source array. + The destination array. + + + + Copies the values from on array to another. + + The source array. + The destination array. + + + + Enumerative Combinatorics and Counting. + + + + + Count the number of possible variations without repetition. + The order matters and each object can be chosen only once. + + Number of elements in the set. + Number of elements to choose from the set. Each element is chosen at most once. + Maximum number of distinct variations. + + + + Count the number of possible variations with repetition. + The order matters and each object can be chosen more than once. + + Number of elements in the set. + Number of elements to choose from the set. Each element is chosen 0, 1 or multiple times. + Maximum number of distinct variations with repetition. + + + + Count the number of possible combinations without repetition. + The order does not matter and each object can be chosen only once. + + Number of elements in the set. + Number of elements to choose from the set. Each element is chosen at most once. + Maximum number of combinations. + + + + Count the number of possible combinations with repetition. + The order does not matter and an object can be chosen more than once. + + Number of elements in the set. + Number of elements to choose from the set. Each element is chosen 0, 1 or multiple times. + Maximum number of combinations with repetition. + + + + Count the number of possible permutations (without repetition). + + Number of (distinguishable) elements in the set. + Maximum number of permutations without repetition. + + + + Generate a random permutation, without repetition, by generating the index numbers 0 to N-1 and shuffle them randomly. + Implemented using Fisher-Yates Shuffling. + + An array of length N that contains (in any order) the integers of the interval [0, N). + Number of (distinguishable) elements in the set. + The random number generator to use. Optional; the default random source will be used if null. + + + + Select a random permutation, without repetition, from a data array by reordering the provided array in-place. + Implemented using Fisher-Yates Shuffling. The provided data array will be modified. + + The data array to be reordered. The array will be modified by this routine. + The random number generator to use. Optional; the default random source will be used if null. + + + + Select a random permutation from a data sequence by returning the provided data in random order. + Implemented using Fisher-Yates Shuffling. + + The data elements to be reordered. + The random number generator to use. Optional; the default random source will be used if null. + + + + Generate a random combination, without repetition, by randomly selecting some of N elements. + + Number of elements in the set. + The random number generator to use. Optional; the default random source will be used if null. + Boolean mask array of length N, for each item true if it is selected. + + + + Generate a random combination, without repetition, by randomly selecting k of N elements. + + Number of elements in the set. + Number of elements to choose from the set. Each element is chosen at most once. + The random number generator to use. Optional; the default random source will be used if null. + Boolean mask array of length N, for each item true if it is selected. + + + + Select a random combination, without repetition, from a data sequence by selecting k elements in original order. + + The data source to choose from. + Number of elements (k) to choose from the data set. Each element is chosen at most once. + The random number generator to use. Optional; the default random source will be used if null. + The chosen combination, in the original order. + + + + Generates a random combination, with repetition, by randomly selecting k of N elements. + + Number of elements in the set. + Number of elements to choose from the set. Elements can be chosen more than once. + The random number generator to use. Optional; the default random source will be used if null. + Integer mask array of length N, for each item the number of times it was selected. + + + + Select a random combination, with repetition, from a data sequence by selecting k elements in original order. + + The data source to choose from. + Number of elements (k) to choose from the data set. Elements can be chosen more than once. + The random number generator to use. Optional; the default random source will be used if null. + The chosen combination with repetition, in the original order. + + + + Generate a random variation, without repetition, by randomly selecting k of n elements with order. + Implemented using partial Fisher-Yates Shuffling. + + Number of elements in the set. + Number of elements to choose from the set. Each element is chosen at most once. + The random number generator to use. Optional; the default random source will be used if null. + An array of length K that contains the indices of the selections as integers of the interval [0, N). + + + + Select a random variation, without repetition, from a data sequence by randomly selecting k elements in random order. + Implemented using partial Fisher-Yates Shuffling. + + The data source to choose from. + Number of elements (k) to choose from the set. Each element is chosen at most once. + The random number generator to use. Optional; the default random source will be used if null. + The chosen variation, in random order. + + + + Generate a random variation, with repetition, by randomly selecting k of n elements with order. + + Number of elements in the set. + Number of elements to choose from the set. Elements can be chosen more than once. + The random number generator to use. Optional; the default random source will be used if null. + An array of length K that contains the indices of the selections as integers of the interval [0, N). + + + + Select a random variation, with repetition, from a data sequence by randomly selecting k elements in random order. + + The data source to choose from. + Number of elements (k) to choose from the data set. Elements can be chosen more than once. + The random number generator to use. Optional; the default random source will be used if null. + The chosen variation with repetition, in random order. + + + + Extension methods for the Complex type provided by System.Numerics + + + + + Gets the squared magnitude of the Complex number. + + The number to perfom this operation on. + The squared magnitude of the Complex number. + + + + Gets the unity of this complex (same argument, but on the unit circle; exp(I*arg)) + + The unity of this Complex. + + + + Gets the conjugate of the Complex number. + + The number to perfom this operation on. + + The semantic of setting the conjugate is such that + + // a, b of type Complex32 + a.Conjugate = b; + + is equivalent to + + // a, b of type Complex32 + a = b.Conjugate + + + The conjugate of the number. + + + + Returns the multiplicative inverse of a complex number. + + + + + Exponential of this Complex (exp(x), E^x). + + The number to perfom this operation on. + + The exponential of this complex number. + + + + + Natural Logarithm of this Complex (Base E). + + The number to perfom this operation on. + + The natural logarithm of this complex number. + + + + + Common Logarithm of this Complex (Base 10). + + The common logarithm of this complex number. + + + + Logarithm of this Complex with custom base. + + The logarithm of this complex number. + + + + Raise this Complex to the given value. + + The number to perfom this operation on. + + The exponent. + + + The complex number raised to the given exponent. + + + + + Raise this Complex to the inverse of the given value. + + The number to perfom this operation on. + + The root exponent. + + + The complex raised to the inverse of the given exponent. + + + + + The Square (power 2) of this Complex + + The number to perfom this operation on. + + The square of this complex number. + + + + + The Square Root (power 1/2) of this Complex + + The number to perfom this operation on. + + The square root of this complex number. + + + + + Evaluate all square roots of this Complex. + + + + + Evaluate all cubic roots of this Complex. + + + + + Gets a value indicating whether the Complex32 is zero. + + The number to perfom this operation on. + true if this instance is zero; otherwise, false. + + + + Gets a value indicating whether the Complex32 is one. + + The number to perfom this operation on. + true if this instance is one; otherwise, false. + + + + Gets a value indicating whether the Complex32 is the imaginary unit. + + true if this instance is ImaginaryOne; otherwise, false. + The number to perfom this operation on. + + + + Gets a value indicating whether the provided Complex32evaluates + to a value that is not a number. + + The number to perfom this operation on. + + true if this instance is NaN; otherwise, + false. + + + + + Gets a value indicating whether the provided Complex32 evaluates to an + infinite value. + + The number to perfom this operation on. + + true if this instance is infinite; otherwise, false. + + + True if it either evaluates to a complex infinity + or to a directed infinity. + + + + + Gets a value indicating whether the provided Complex32 is real. + + The number to perfom this operation on. + true if this instance is a real number; otherwise, false. + + + + Gets a value indicating whether the provided Complex32 is real and not negative, that is >= 0. + + The number to perfom this operation on. + + true if this instance is real nonnegative number; otherwise, false. + + + + + Returns a Norm of a value of this type, which is appropriate for measuring how + close this value is to zero. + + + + + Returns a Norm of a value of this type, which is appropriate for measuring how + close this value is to zero. + + + + + Returns a Norm of the difference of two values of this type, which is + appropriate for measuring how close together these two values are. + + + + + Returns a Norm of the difference of two values of this type, which is + appropriate for measuring how close together these two values are. + + + + + Creates a complex number based on a string. The string can be in the + following formats (without the quotes): 'n', 'ni', 'n +/- ni', + 'ni +/- n', 'n,n', 'n,ni,' '(n,n)', or '(n,ni)', where n is a double. + + + A complex number containing the value specified by the given string. + + + The string to parse. + + + + + Creates a complex number based on a string. The string can be in the + following formats (without the quotes): 'n', 'ni', 'n +/- ni', + 'ni +/- n', 'n,n', 'n,ni,' '(n,n)', or '(n,ni)', where n is a double. + + + A complex number containing the value specified by the given string. + + + the string to parse. + + + An that supplies culture-specific + formatting information. + + + + + Parse a part (real or complex) from a complex number. + + Start Token. + Is set to true if the part identified itself as being imaginary. + + An that supplies culture-specific + formatting information. + + Resulting part as double. + + + + + Converts the string representation of a complex number to a double-precision complex number equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex number to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will contain Complex.Zero. This parameter is passed uninitialized. + + + + + Converts the string representation of a complex number to double-precision complex number equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex number to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will contain complex32.Zero. This parameter is passed uninitialized + + + + + Creates a Complex32 number based on a string. The string can be in the + following formats (without the quotes): 'n', 'ni', 'n +/- ni', + 'ni +/- n', 'n,n', 'n,ni,' '(n,n)', or '(n,ni)', where n is a double. + + + A complex number containing the value specified by the given string. + + + the string to parse. + + + + + Creates a Complex32 number based on a string. The string can be in the + following formats (without the quotes): 'n', 'ni', 'n +/- ni', + 'ni +/- n', 'n,n', 'n,ni,' '(n,n)', or '(n,ni)', where n is a double. + + + A complex number containing the value specified by the given string. + + + the string to parse. + + + An that supplies culture-specific + formatting information. + + + + + Converts the string representation of a complex number to a single-precision complex number equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex number to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will contain complex32.Zero. This parameter is passed uninitialized. + + + + + Converts the string representation of a complex number to single-precision complex number equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex number to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will contain Complex.Zero. This parameter is passed uninitialized. + + + + + A collection of frequently used mathematical constants. + + + + The number e + + + The number log[2](e) + + + The number log[10](e) + + + The number log[e](2) + + + The number log[e](10) + + + The number log[e](pi) + + + The number log[e](2*pi)/2 + + + The number 1/e + + + The number sqrt(e) + + + The number sqrt(2) + + + The number sqrt(3) + + + The number sqrt(1/2) = 1/sqrt(2) = sqrt(2)/2 + + + The number sqrt(3)/2 + + + The number pi + + + The number pi*2 + + + The number pi/2 + + + The number pi*3/2 + + + The number pi/4 + + + The number sqrt(pi) + + + The number sqrt(2pi) + + + The number sqrt(2*pi*e) + + + The number log(sqrt(2*pi)) + + + The number log(sqrt(2*pi*e)) + + + The number log(2 * sqrt(e / pi)) + + + The number 1/pi + + + The number 2/pi + + + The number 1/sqrt(pi) + + + The number 1/sqrt(2pi) + + + The number 2/sqrt(pi) + + + The number 2 * sqrt(e / pi) + + + The number (pi)/180 - factor to convert from Degree (deg) to Radians (rad). + + + + + The number (pi)/200 - factor to convert from NewGrad (grad) to Radians (rad). + + + + + The number ln(10)/20 - factor to convert from Power Decibel (dB) to Neper (Np). Use this version when the Decibel represent a power gain but the compared values are not powers (e.g. amplitude, current, voltage). + + + The number ln(10)/10 - factor to convert from Neutral Decibel (dB) to Neper (Np). Use this version when either both or neither of the Decibel and the compared values represent powers. + + + The Catalan constant + Sum(k=0 -> inf){ (-1)^k/(2*k + 1)2 } + + + The Euler-Mascheroni constant + lim(n -> inf){ Sum(k=1 -> n) { 1/k - log(n) } } + + + The number (1+sqrt(5))/2, also known as the golden ratio + + + The Glaisher constant + e^(1/12 - Zeta(-1)) + + + The Khinchin constant + prod(k=1 -> inf){1+1/(k*(k+2))^log(k,2)} + + + + The size of a double in bytes. + + + + + The size of an int in bytes. + + + + + The size of a float in bytes. + + + + + The size of a Complex in bytes. + + + + + The size of a Complex in bytes. + + + + Speed of Light in Vacuum: c_0 = 2.99792458e8 [m s^-1] (defined, exact; 2007 CODATA) + + + Magnetic Permeability in Vacuum: mu_0 = 4*Pi * 10^-7 [N A^-2 = kg m A^-2 s^-2] (defined, exact; 2007 CODATA) + + + Electric Permittivity in Vacuum: epsilon_0 = 1/(mu_0*c_0^2) [F m^-1 = A^2 s^4 kg^-1 m^-3] (defined, exact; 2007 CODATA) + + + Characteristic Impedance of Vacuum: Z_0 = mu_0*c_0 [Ohm = m^2 kg s^-3 A^-2] (defined, exact; 2007 CODATA) + + + Newtonian Constant of Gravitation: G = 6.67429e-11 [m^3 kg^-1 s^-2] (2007 CODATA) + + + Planck's constant: h = 6.62606896e-34 [J s = m^2 kg s^-1] (2007 CODATA) + + + Reduced Planck's constant: h_bar = h / (2*Pi) [J s = m^2 kg s^-1] (2007 CODATA) + + + Planck mass: m_p = (h_bar*c_0/G)^(1/2) [kg] (2007 CODATA) + + + Planck temperature: T_p = (h_bar*c_0^5/G)^(1/2)/k [K] (2007 CODATA) + + + Planck length: l_p = h_bar/(m_p*c_0) [m] (2007 CODATA) + + + Planck time: t_p = l_p/c_0 [s] (2007 CODATA) + + + Elementary Electron Charge: e = 1.602176487e-19 [C = A s] (2007 CODATA) + + + Magnetic Flux Quantum: theta_0 = h/(2*e) [Wb = m^2 kg s^-2 A^-1] (2007 CODATA) + + + Conductance Quantum: G_0 = 2*e^2/h [S = m^-2 kg^-1 s^3 A^2] (2007 CODATA) + + + Josephson Constant: K_J = 2*e/h [Hz V^-1] (2007 CODATA) + + + Von Klitzing Constant: R_K = h/e^2 [Ohm = m^2 kg s^-3 A^-2] (2007 CODATA) + + + Bohr Magneton: mu_B = e*h_bar/2*m_e [J T^-1] (2007 CODATA) + + + Nuclear Magneton: mu_N = e*h_bar/2*m_p [J T^-1] (2007 CODATA) + + + Fine Structure Constant: alpha = e^2/4*Pi*e_0*h_bar*c_0 [1] (2007 CODATA) + + + Rydberg Constant: R_infty = alpha^2*m_e*c_0/2*h [m^-1] (2007 CODATA) + + + Bor Radius: a_0 = alpha/4*Pi*R_infty [m] (2007 CODATA) + + + Hartree Energy: E_h = 2*R_infty*h*c_0 [J] (2007 CODATA) + + + Quantum of Circulation: h/2*m_e [m^2 s^-1] (2007 CODATA) + + + Fermi Coupling Constant: G_F/(h_bar*c_0)^3 [GeV^-2] (2007 CODATA) + + + Weak Mixin Angle: sin^2(theta_W) [1] (2007 CODATA) + + + Electron Mass: [kg] (2007 CODATA) + + + Electron Mass Energy Equivalent: [J] (2007 CODATA) + + + Electron Molar Mass: [kg mol^-1] (2007 CODATA) + + + Electron Compton Wavelength: [m] (2007 CODATA) + + + Classical Electron Radius: [m] (2007 CODATA) + + + Tomson Cross Section: [m^2] (2002 CODATA) + + + Electron Magnetic Moment: [J T^-1] (2007 CODATA) + + + Electon G-Factor: [1] (2007 CODATA) + + + Muon Mass: [kg] (2007 CODATA) + + + Muon Mass Energy Equivalent: [J] (2007 CODATA) + + + Muon Molar Mass: [kg mol^-1] (2007 CODATA) + + + Muon Compton Wavelength: [m] (2007 CODATA) + + + Muon Magnetic Moment: [J T^-1] (2007 CODATA) + + + Muon G-Factor: [1] (2007 CODATA) + + + Tau Mass: [kg] (2007 CODATA) + + + Tau Mass Energy Equivalent: [J] (2007 CODATA) + + + Tau Molar Mass: [kg mol^-1] (2007 CODATA) + + + Tau Compton Wavelength: [m] (2007 CODATA) + + + Proton Mass: [kg] (2007 CODATA) + + + Proton Mass Energy Equivalent: [J] (2007 CODATA) + + + Proton Molar Mass: [kg mol^-1] (2007 CODATA) + + + Proton Compton Wavelength: [m] (2007 CODATA) + + + Proton Magnetic Moment: [J T^-1] (2007 CODATA) + + + Proton G-Factor: [1] (2007 CODATA) + + + Proton Shielded Magnetic Moment: [J T^-1] (2007 CODATA) + + + Proton Gyro-Magnetic Ratio: [s^-1 T^-1] (2007 CODATA) + + + Proton Shielded Gyro-Magnetic Ratio: [s^-1 T^-1] (2007 CODATA) + + + Neutron Mass: [kg] (2007 CODATA) + + + Neutron Mass Energy Equivalent: [J] (2007 CODATA) + + + Neutron Molar Mass: [kg mol^-1] (2007 CODATA) + + + Neuron Compton Wavelength: [m] (2007 CODATA) + + + Neutron Magnetic Moment: [J T^-1] (2007 CODATA) + + + Neutron G-Factor: [1] (2007 CODATA) + + + Neutron Gyro-Magnetic Ratio: [s^-1 T^-1] (2007 CODATA) + + + Deuteron Mass: [kg] (2007 CODATA) + + + Deuteron Mass Energy Equivalent: [J] (2007 CODATA) + + + Deuteron Molar Mass: [kg mol^-1] (2007 CODATA) + + + Deuteron Magnetic Moment: [J T^-1] (2007 CODATA) + + + Helion Mass: [kg] (2007 CODATA) + + + Helion Mass Energy Equivalent: [J] (2007 CODATA) + + + Helion Molar Mass: [kg mol^-1] (2007 CODATA) + + + Avogadro constant: [mol^-1] (2010 CODATA) + + + The SI prefix factor corresponding to 1 000 000 000 000 000 000 000 000 + + + The SI prefix factor corresponding to 1 000 000 000 000 000 000 000 + + + The SI prefix factor corresponding to 1 000 000 000 000 000 000 + + + The SI prefix factor corresponding to 1 000 000 000 000 000 + + + The SI prefix factor corresponding to 1 000 000 000 000 + + + The SI prefix factor corresponding to 1 000 000 000 + + + The SI prefix factor corresponding to 1 000 000 + + + The SI prefix factor corresponding to 1 000 + + + The SI prefix factor corresponding to 100 + + + The SI prefix factor corresponding to 10 + + + The SI prefix factor corresponding to 0.1 + + + The SI prefix factor corresponding to 0.01 + + + The SI prefix factor corresponding to 0.001 + + + The SI prefix factor corresponding to 0.000 001 + + + The SI prefix factor corresponding to 0.000 000 001 + + + The SI prefix factor corresponding to 0.000 000 000 001 + + + The SI prefix factor corresponding to 0.000 000 000 000 001 + + + The SI prefix factor corresponding to 0.000 000 000 000 000 001 + + + The SI prefix factor corresponding to 0.000 000 000 000 000 000 001 + + + The SI prefix factor corresponding to 0.000 000 000 000 000 000 000 001 + + + + Sets parameters for the library. + + + + + Use a specific provider if configured, e.g. using + environment variables, or fall back to the best providers. + + + + + Use the best provider available. + + + + + Use the Intel MKL native provider for linear algebra. + Throws if it is not available or failed to initialize, in which case the previous provider is still active. + + + + + Use the Intel MKL native provider for linear algebra, with the specified configuration parameters. + Throws if it is not available or failed to initialize, in which case the previous provider is still active. + + + + + Use the Intel MKL native provider for linear algebra, with the specified configuration parameters. + Throws if it is not available or failed to initialize, in which case the previous provider is still active. + + + + + Try to use the Intel MKL native provider for linear algebra. + + + True if the provider was found and initialized successfully. + False if it failed and the previous provider is still active. + + + + + Use the Nvidia CUDA native provider for linear algebra. + Throws if it is not available or failed to initialize, in which case the previous provider is still active. + + + + + Try to use the Nvidia CUDA native provider for linear algebra. + + + True if the provider was found and initialized successfully. + False if it failed and the previous provider is still active. + + + + + Use the OpenBLAS native provider for linear algebra. + Throws if it is not available or failed to initialize, in which case the previous provider is still active. + + + + + Try to use the OpenBLAS native provider for linear algebra. + + + True if the provider was found and initialized successfully. + False if it failed and the previous provider is still active. + + + + + Try to use any available native provider in an undefined order. + + + True if one of the native providers was found and successfully initialized. + False if it failed and the previous provider is still active. + + + + + Gets or sets a value indicating whether the distribution classes check validate each parameter. + For the multivariate distributions this could involve an expensive matrix factorization. + The default setting of this property is true. + + + + + Gets or sets a value indicating whether to use thread safe random number generators (RNG). + Thread safe RNG about two and half time slower than non-thread safe RNG. + + + true to use thread safe random number generators ; otherwise, false. + + + + + Optional path to try to load native provider binaries from. + + + + + Gets or sets the linear algebra provider. Consider to use UseNativeMKL or UseManaged instead. + + The linear algebra provider. + + + + Gets or sets the fourier transform provider. Consider to use UseNativeMKL or UseManaged instead. + + The linear algebra provider. + + + + Gets or sets a value indicating how many parallel worker threads shall be used + when parallelization is applicable. + + Default to the number of processor cores, must be between 1 and 1024 (inclusive). + + + + Gets or sets the TaskScheduler used to schedule the worker tasks. + + + + + Gets or sets the the block size to use for + the native linear algebra provider. + + The block size. Default 512, must be at least 32. + + + + Gets or sets the order of the matrix when linear algebra provider + must calculate multiply in parallel threads. + + The order. Default 64, must be at least 3. + + + + Gets or sets the number of elements a vector or matrix + must contain before we multiply threads. + + Number of elements. Default 300, must be at least 3. + + + + 32-bit single precision complex numbers class. + + + + The class Complex32 provides all elementary operations + on complex numbers. All the operators +, -, + *, /, ==, != are defined in the + canonical way. Additional complex trigonometric functions + are also provided. Note that the Complex32 structures + has two special constant values and + . + + + + Complex32 x = new Complex32(1f,2f); + Complex32 y = Complex32.FromPolarCoordinates(1f, Math.Pi); + Complex32 z = (x + y) / (x - y); + + + + For mathematical details about complex numbers, please + have a look at the + Wikipedia + + + + + + The real component of the complex number. + + + + + The imaginary component of the complex number. + + + + + Initializes a new instance of the Complex32 structure with the given real + and imaginary parts. + + The value for the real component. + The value for the imaginary component. + + + + Creates a complex number from a point's polar coordinates. + + A complex number. + The magnitude, which is the distance from the origin (the intersection of the x-axis and the y-axis) to the number. + The phase, which is the angle from the line to the horizontal axis, measured in radians. + + + + Returns a new instance + with a real number equal to zero and an imaginary number equal to zero. + + + + + Returns a new instance + with a real number equal to one and an imaginary number equal to zero. + + + + + Returns a new instance + with a real number equal to zero and an imaginary number equal to one. + + + + + Returns a new instance + with real and imaginary numbers positive infinite. + + + + + Returns a new instance + with real and imaginary numbers not a number. + + + + + Gets the real component of the complex number. + + The real component of the complex number. + + + + Gets the real imaginary component of the complex number. + + The real imaginary component of the complex number. + + + + Gets the phase or argument of this Complex32. + + + Phase always returns a value bigger than negative Pi and + smaller or equal to Pi. If this Complex32 is zero, the Complex32 + is assumed to be positive real with an argument of zero. + + The phase or argument of this Complex32 + + + + Gets the magnitude (or absolute value) of a complex number. + + Assuming that magnitude of (inf,a) and (a,inf) and (inf,inf) is inf and (NaN,a), (a,NaN) and (NaN,NaN) is NaN + The magnitude of the current instance. + + + + Gets the squared magnitude (or squared absolute value) of a complex number. + + The squared magnitude of the current instance. + + + + Gets the unity of this complex (same argument, but on the unit circle; exp(I*arg)) + + The unity of this Complex32. + + + + Gets a value indicating whether the Complex32 is zero. + + true if this instance is zero; otherwise, false. + + + + Gets a value indicating whether the Complex32 is one. + + true if this instance is one; otherwise, false. + + + + Gets a value indicating whether the Complex32 is the imaginary unit. + + true if this instance is ImaginaryOne; otherwise, false. + + + + Gets a value indicating whether the provided Complex32evaluates + to a value that is not a number. + + + true if this instance is ; otherwise, + false. + + + + + Gets a value indicating whether the provided Complex32 evaluates to an + infinite value. + + + true if this instance is infinite; otherwise, false. + + + True if it either evaluates to a complex infinity + or to a directed infinity. + + + + + Gets a value indicating whether the provided Complex32 is real. + + true if this instance is a real number; otherwise, false. + + + + Gets a value indicating whether the provided Complex32 is real and not negative, that is >= 0. + + + true if this instance is real nonnegative number; otherwise, false. + + + + + Exponential of this Complex32 (exp(x), E^x). + + + The exponential of this complex number. + + + + + Natural Logarithm of this Complex32 (Base E). + + The natural logarithm of this complex number. + + + + Common Logarithm of this Complex32 (Base 10). + + The common logarithm of this complex number. + + + + Logarithm of this Complex32 with custom base. + + The logarithm of this complex number. + + + + Raise this Complex32 to the given value. + + + The exponent. + + + The complex number raised to the given exponent. + + + + + Raise this Complex32 to the inverse of the given value. + + + The root exponent. + + + The complex raised to the inverse of the given exponent. + + + + + The Square (power 2) of this Complex32 + + + The square of this complex number. + + + + + The Square Root (power 1/2) of this Complex32 + + + The square root of this complex number. + + + + + Evaluate all square roots of this Complex32. + + + + + Evaluate all cubic roots of this Complex32. + + + + + Equality test. + + One of complex numbers to compare. + The other complex numbers to compare. + true if the real and imaginary components of the two complex numbers are equal; false otherwise. + + + + Inequality test. + + One of complex numbers to compare. + The other complex numbers to compare. + true if the real or imaginary components of the two complex numbers are not equal; false otherwise. + + + + Unary addition. + + The complex number to operate on. + Returns the same complex number. + + + + Unary minus. + + The complex number to operate on. + The negated value of the . + + + Addition operator. Adds two complex numbers together. + The result of the addition. + One of the complex numbers to add. + The other complex numbers to add. + + + Subtraction operator. Subtracts two complex numbers. + The result of the subtraction. + The complex number to subtract from. + The complex number to subtract. + + + Addition operator. Adds a complex number and float together. + The result of the addition. + The complex numbers to add. + The float value to add. + + + Subtraction operator. Subtracts float value from a complex value. + The result of the subtraction. + The complex number to subtract from. + The float value to subtract. + + + Addition operator. Adds a complex number and float together. + The result of the addition. + The float value to add. + The complex numbers to add. + + + Subtraction operator. Subtracts complex value from a float value. + The result of the subtraction. + The float vale to subtract from. + The complex value to subtract. + + + Multiplication operator. Multiplies two complex numbers. + The result of the multiplication. + One of the complex numbers to multiply. + The other complex number to multiply. + + + Multiplication operator. Multiplies a complex number with a float value. + The result of the multiplication. + The float value to multiply. + The complex number to multiply. + + + Multiplication operator. Multiplies a complex number with a float value. + The result of the multiplication. + The complex number to multiply. + The float value to multiply. + + + Division operator. Divides a complex number by another. + Enhanced Smith's algorithm for dividing two complex numbers + + The result of the division. + The dividend. + The divisor. + + + + Helper method for dividing. + + Re first + Im first + Re second + Im second + + + + + Division operator. Divides a float value by a complex number. + Algorithm based on Smith's algorithm + + The result of the division. + The dividend. + The divisor. + + + Division operator. Divides a complex number by a float value. + The result of the division. + The dividend. + The divisor. + + + + Computes the conjugate of a complex number and returns the result. + + + + + Returns the multiplicative inverse of a complex number. + + + + + Converts the value of the current complex number to its equivalent string representation in Cartesian form. + + The string representation of the current instance in Cartesian form. + + + + Converts the value of the current complex number to its equivalent string representation + in Cartesian form by using the specified format for its real and imaginary parts. + + The string representation of the current instance in Cartesian form. + A standard or custom numeric format string. + + is not a valid format string. + + + + Converts the value of the current complex number to its equivalent string representation + in Cartesian form by using the specified culture-specific formatting information. + + The string representation of the current instance in Cartesian form, as specified by . + An object that supplies culture-specific formatting information. + + + Converts the value of the current complex number to its equivalent string representation + in Cartesian form by using the specified format and culture-specific format information for its real and imaginary parts. + The string representation of the current instance in Cartesian form, as specified by and . + A standard or custom numeric format string. + An object that supplies culture-specific formatting information. + + is not a valid format string. + + + + Checks if two complex numbers are equal. Two complex numbers are equal if their + corresponding real and imaginary components are equal. + + + Returns true if the two objects are the same object, or if their corresponding + real and imaginary components are equal, false otherwise. + + + The complex number to compare to with. + + + + + The hash code for the complex number. + + + The hash code of the complex number. + + + The hash code is calculated as + System.Math.Exp(ComplexMath.Absolute(complexNumber)). + + + + + Checks if two complex numbers are equal. Two complex numbers are equal if their + corresponding real and imaginary components are equal. + + + Returns true if the two objects are the same object, or if their corresponding + real and imaginary components are equal, false otherwise. + + + The complex number to compare to with. + + + + + Creates a complex number based on a string. The string can be in the + following formats (without the quotes): 'n', 'ni', 'n +/- ni', + 'ni +/- n', 'n,n', 'n,ni,' '(n,n)', or '(n,ni)', where n is a float. + + + A complex number containing the value specified by the given string. + + + the string to parse. + + + An that supplies culture-specific + formatting information. + + + + + Parse a part (real or complex) from a complex number. + + Start Token. + Is set to true if the part identified itself as being imaginary. + + An that supplies culture-specific + formatting information. + + Resulting part as float. + + + + + Converts the string representation of a complex number to a single-precision complex number equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex number to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will contain complex32.Zero. This parameter is passed uninitialized + + + + + Converts the string representation of a complex number to single-precision complex number equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex number to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will contain complex32.Zero. This parameter is passed uninitialized + + + + + Explicit conversion of a real decimal to a Complex32. + + The decimal value to convert. + The result of the conversion. + + + + Explicit conversion of a Complex to a Complex32. + + The decimal value to convert. + The result of the conversion. + + + + Implicit conversion of a real byte to a Complex32. + + The byte value to convert. + The result of the conversion. + + + + Implicit conversion of a real short to a Complex32. + + The short value to convert. + The result of the conversion. + + + + Implicit conversion of a signed byte to a Complex32. + + The signed byte value to convert. + The result of the conversion. + + + + Implicit conversion of a unsgined real short to a Complex32. + + The unsgined short value to convert. + The result of the conversion. + + + + Implicit conversion of a real int to a Complex32. + + The int value to convert. + The result of the conversion. + + + + Implicit conversion of a BigInteger int to a Complex32. + + The BigInteger value to convert. + The result of the conversion. + + + + Implicit conversion of a real long to a Complex32. + + The long value to convert. + The result of the conversion. + + + + Implicit conversion of a real uint to a Complex32. + + The uint value to convert. + The result of the conversion. + + + + Implicit conversion of a real ulong to a Complex32. + + The ulong value to convert. + The result of the conversion. + + + + Implicit conversion of a real float to a Complex32. + + The float value to convert. + The result of the conversion. + + + + Implicit conversion of a real double to a Complex32. + + The double value to convert. + The result of the conversion. + + + + Converts this Complex32 to a . + + A with the same values as this Complex32. + + + + Returns the additive inverse of a specified complex number. + + The result of the real and imaginary components of the value parameter multiplied by -1. + A complex number. + + + + Computes the conjugate of a complex number and returns the result. + + The conjugate of . + A complex number. + + + + Adds two complex numbers and returns the result. + + The sum of and . + The first complex number to add. + The second complex number to add. + + + + Subtracts one complex number from another and returns the result. + + The result of subtracting from . + The value to subtract from (the minuend). + The value to subtract (the subtrahend). + + + + Returns the product of two complex numbers. + + The product of the and parameters. + The first complex number to multiply. + The second complex number to multiply. + + + + Divides one complex number by another and returns the result. + + The quotient of the division. + The complex number to be divided. + The complex number to divide by. + + + + Returns the multiplicative inverse of a complex number. + + The reciprocal of . + A complex number. + + + + Returns the square root of a specified complex number. + + The square root of . + A complex number. + + + + Gets the absolute value (or magnitude) of a complex number. + + The absolute value of . + A complex number. + + + + Returns e raised to the power specified by a complex number. + + The number e raised to the power . + A complex number that specifies a power. + + + + Returns a specified complex number raised to a power specified by a complex number. + + The complex number raised to the power . + A complex number to be raised to a power. + A complex number that specifies a power. + + + + Returns a specified complex number raised to a power specified by a single-precision floating-point number. + + The complex number raised to the power . + A complex number to be raised to a power. + A single-precision floating-point number that specifies a power. + + + + Returns the natural (base e) logarithm of a specified complex number. + + The natural (base e) logarithm of . + A complex number. + + + + Returns the logarithm of a specified complex number in a specified base. + + The logarithm of in base . + A complex number. + The base of the logarithm. + + + + Returns the base-10 logarithm of a specified complex number. + + The base-10 logarithm of . + A complex number. + + + + Returns the sine of the specified complex number. + + The sine of . + A complex number. + + + + Returns the cosine of the specified complex number. + + The cosine of . + A complex number. + + + + Returns the tangent of the specified complex number. + + The tangent of . + A complex number. + + + + Returns the angle that is the arc sine of the specified complex number. + + The angle which is the arc sine of . + A complex number. + + + + Returns the angle that is the arc cosine of the specified complex number. + + The angle, measured in radians, which is the arc cosine of . + A complex number that represents a cosine. + + + + Returns the angle that is the arc tangent of the specified complex number. + + The angle that is the arc tangent of . + A complex number. + + + + Returns the hyperbolic sine of the specified complex number. + + The hyperbolic sine of . + A complex number. + + + + Returns the hyperbolic cosine of the specified complex number. + + The hyperbolic cosine of . + A complex number. + + + + Returns the hyperbolic tangent of the specified complex number. + + The hyperbolic tangent of . + A complex number. + + + + Metrics to measure the distance between two structures. + + + + + Sum of Absolute Difference (SAD), i.e. the L1-norm (Manhattan) of the difference. + + + + + Sum of Absolute Difference (SAD), i.e. the L1-norm (Manhattan) of the difference. + + + + + Sum of Absolute Difference (SAD), i.e. the L1-norm (Manhattan) of the difference. + + + + + Mean-Absolute Error (MAE), i.e. the normalized L1-norm (Manhattan) of the difference. + + + + + Mean-Absolute Error (MAE), i.e. the normalized L1-norm (Manhattan) of the difference. + + + + + Mean-Absolute Error (MAE), i.e. the normalized L1-norm (Manhattan) of the difference. + + + + + Sum of Squared Difference (SSD), i.e. the squared L2-norm (Euclidean) of the difference. + + + + + Sum of Squared Difference (SSD), i.e. the squared L2-norm (Euclidean) of the difference. + + + + + Sum of Squared Difference (SSD), i.e. the squared L2-norm (Euclidean) of the difference. + + + + + Mean-Squared Error (MSE), i.e. the normalized squared L2-norm (Euclidean) of the difference. + + + + + Mean-Squared Error (MSE), i.e. the normalized squared L2-norm (Euclidean) of the difference. + + + + + Mean-Squared Error (MSE), i.e. the normalized squared L2-norm (Euclidean) of the difference. + + + + + Euclidean Distance, i.e. the L2-norm of the difference. + + + + + Euclidean Distance, i.e. the L2-norm of the difference. + + + + + Euclidean Distance, i.e. the L2-norm of the difference. + + + + + Manhattan Distance, i.e. the L1-norm of the difference. + + + + + Manhattan Distance, i.e. the L1-norm of the difference. + + + + + Manhattan Distance, i.e. the L1-norm of the difference. + + + + + Chebyshev Distance, i.e. the Infinity-norm of the difference. + + + + + Chebyshev Distance, i.e. the Infinity-norm of the difference. + + + + + Chebyshev Distance, i.e. the Infinity-norm of the difference. + + + + + Minkowski Distance, i.e. the generalized p-norm of the difference. + + + + + Minkowski Distance, i.e. the generalized p-norm of the difference. + + + + + Minkowski Distance, i.e. the generalized p-norm of the difference. + + + + + Canberra Distance, a weighted version of the L1-norm of the difference. + + + + + Canberra Distance, a weighted version of the L1-norm of the difference. + + + + + Cosine Distance, representing the angular distance while ignoring the scale. + + + + + Cosine Distance, representing the angular distance while ignoring the scale. + + + + + Hamming Distance, i.e. the number of positions that have different values in the vectors. + + + + + Hamming Distance, i.e. the number of positions that have different values in the vectors. + + + + + Pearson's distance, i.e. 1 - the person correlation coefficient. + + + + + Jaccard distance, i.e. 1 - the Jaccard index. + + Thrown if a or b are null. + Throw if a and b are of different lengths. + Jaccard distance. + + + + Jaccard distance, i.e. 1 - the Jaccard index. + + Thrown if a or b are null. + Throw if a and b are of different lengths. + Jaccard distance. + + + + Least-Squares Curve Fitting Routines + + + + + Least-Squares fitting the points (x,y) to a line y : x -> a+b*x, + returning its best fitting parameters as [a, b] array, + where a is the intercept and b the slope. + + + + + Least-Squares fitting the points (x,y) to a line y : x -> a+b*x, + returning a function y' for the best fitting line. + + + + + Least-Squares fitting the points (X,y) = ((x0,x1,..,xk),y) to a linear surface y : X -> p0*x0 + p1*x1 + ... + pk*xk, + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + If an intercept is added, its coefficient will be prepended to the resulting parameters. + + + + + Least-Squares fitting the points (X,y) = ((x0,x1,..,xk),y) to a linear surface y : X -> p0*x0 + p1*x1 + ... + pk*xk, + returning a function y' for the best fitting combination. + If an intercept is added, its coefficient will be prepended to the resulting parameters. + + + + + Weighted Least-Squares fitting the points (X,y) = ((x0,x1,..,xk),y) and weights w to a linear surface y : X -> p0*x0 + p1*x1 + ... + pk*xk, + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + + + + + Least-Squares fitting the points (x,y) to a k-order polynomial y : x -> p0 + p1*x + p2*x^2 + ... + pk*x^k, + returning its best fitting parameters as [p0, p1, p2, ..., pk] array, compatible with Evaluate.Polynomial. + A polynomial with order/degree k has (k+1) coefficients and thus requires at least (k+1) samples. + + + + + Least-Squares fitting the points (x,y) to a k-order polynomial y : x -> p0 + p1*x + p2*x^2 + ... + pk*x^k, + returning a function y' for the best fitting polynomial. + A polynomial with order/degree k has (k+1) coefficients and thus requires at least (k+1) samples. + + + + + Weighted Least-Squares fitting the points (x,y) and weights w to a k-order polynomial y : x -> p0 + p1*x + p2*x^2 + ... + pk*x^k, + returning its best fitting parameters as [p0, p1, p2, ..., pk] array, compatible with Evaluate.Polynomial. + A polynomial with order/degree k has (k+1) coefficients and thus requires at least (k+1) samples. + + + + + Least-Squares fitting the points (x,y) to an arbitrary linear combination y : x -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + + + + + Least-Squares fitting the points (x,y) to an arbitrary linear combination y : x -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning a function y' for the best fitting combination. + + + + + Least-Squares fitting the points (x,y) to an arbitrary linear combination y : x -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + + + + + Least-Squares fitting the points (x,y) to an arbitrary linear combination y : x -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning a function y' for the best fitting combination. + + + + + Least-Squares fitting the points (X,y) = ((x0,x1,..,xk),y) to an arbitrary linear combination y : X -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + + + + + Least-Squares fitting the points (X,y) = ((x0,x1,..,xk),y) to an arbitrary linear combination y : X -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning a function y' for the best fitting combination. + + + + + Least-Squares fitting the points (X,y) = ((x0,x1,..,xk),y) to an arbitrary linear combination y : X -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + + + + + Least-Squares fitting the points (X,y) = ((x0,x1,..,xk),y) to an arbitrary linear combination y : X -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning a function y' for the best fitting combination. + + + + + Least-Squares fitting the points (T,y) = (T,y) to an arbitrary linear combination y : X -> p0*f0(T) + p1*f1(T) + ... + pk*fk(T), + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + + + + + Least-Squares fitting the points (T,y) = (T,y) to an arbitrary linear combination y : X -> p0*f0(T) + p1*f1(T) + ... + pk*fk(T), + returning a function y' for the best fitting combination. + + + + + Least-Squares fitting the points (T,y) = (T,y) to an arbitrary linear combination y : X -> p0*f0(T) + p1*f1(T) + ... + pk*fk(T), + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + + + + + Least-Squares fitting the points (T,y) = (T,y) to an arbitrary linear combination y : X -> p0*f0(T) + p1*f1(T) + ... + pk*fk(T), + returning a function y' for the best fitting combination. + + + + + Compound Monthly Return or Geometric Return or Annualized Return + + + + + Average Gain or Gain Mean + This is a simple average (arithmetic mean) of the periods with a gain. It is calculated by summing the returns for gain periods (return 0) + and then dividing the total by the number of gain periods. + + http://www.offshore-library.com/kb/statistics.php + + + + Average Loss or LossMean + This is a simple average (arithmetic mean) of the periods with a loss. It is calculated by summing the returns for loss periods (return < 0) + and then dividing the total by the number of loss periods. + + http://www.offshore-library.com/kb/statistics.php + + + + Calculation is similar to Standard Deviation , except it calculates an average (mean) return only for periods with a gain + and measures the variation of only the gain periods around the gain mean. Measures the volatility of upside performance. + © Copyright 1996, 1999 Gary L.Gastineau. First Edition. © 1992 Swiss Bank Corporation. + + + + + Similar to standard deviation, except this statistic calculates an average (mean) return for only the periods with a loss and then + measures the variation of only the losing periods around this loss mean. This statistic measures the volatility of downside performance. + + http://www.offshore-library.com/kb/statistics.php + + + + This measure is similar to the loss standard deviation except the downside deviation + considers only returns that fall below a defined minimum acceptable return (MAR) rather than the arithmetic mean. + For example, if the MAR is 7%, the downside deviation would measure the variation of each period that falls below + 7%. (The loss standard deviation, on the other hand, would take only losing periods, calculate an average return for + the losing periods, and then measure the variation between each losing return and the losing return average). + + + + + A measure of volatility in returns below the mean. It's similar to standard deviation, but it only + looks at periods where the investment return was less than average return. + + + + + Measures a fund’s average gain in a gain period divided by the fund’s average loss in a losing + period. Periods can be monthly or quarterly depending on the data frequency. + + + + + An algorithm failed to converge. + + + + + An algorithm failed to converge due to a numerical breakdown. + + + + + An error occured calling native provider function. + + + + + An error occured calling native provider function. + + + + + Native provider was unable to allocate sufficent memory. + + + + + Native provider failed LU inversion do to a singular U matrix. + + + + + A random number generator based on the class in the .NET library. + + + + + Construct a new random number generator with a random seed. + + + + + Construct a new random number generator with random seed. + + if set to true , the class is thread safe. + + + + Construct a new random number generator with random seed. + + The seed value. + + + + Construct a new random number generator with random seed. + + The seed value. + if set to true , the class is thread safe. + + + + Default instance, thread-safe. + + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Returns a random 32-bit signed integer greater than or equal to zero and less than + + + + + Returns a random 32-bit signed integer within the specified range. + + The exclusive upper bound of the random number returned. Range: maxExclusive ≥ 2 (not verified, must be ensured by caller). + + + + Returns a random 32-bit signed integer within the specified range. + + The inclusive lower bound of the random number returned. + The exclusive upper bound of the random number returned. Range: maxExclusive ≥ minExclusive + 2 (not verified, must be ensured by caller). + + + + Fills the elements of a specified array of bytes with random numbers in full range, including zero and 255 (). + + + + + Fill an array with uniform random numbers greater than or equal to 0.0 and less than 1.0. + WARNING: potentially very short random sequence length, can generate repeated partial sequences. + + Parallelized on large length, but also supports being called in parallel from multiple threads + + + + Returns an array of uniform random numbers greater than or equal to 0.0 and less than 1.0. + WARNING: potentially very short random sequence length, can generate repeated partial sequences. + + Parallelized on large length, but also supports being called in parallel from multiple threads + + + + Returns an infinite sequence of uniform random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Provides a time-dependent seed value, matching the default behavior of System.Random. + WARNING: There is no randomness in this seed and quick repeated calls can cause + the same seed value. Do not use for cryptography! + + + + + Provides a seed based on time and unique GUIDs. + WARNING: There is only low randomness in this seed, but at least quick repeated + calls will result in different seed values. Do not use for cryptography! + + + + + Provides a seed based on an internal random number generator (crypto if available), time and unique GUIDs. + WARNING: There is only medium randomness in this seed, but quick repeated + calls will result in different seed values. Do not use for cryptography! + + + + + Base class for random number generators. This class introduces a layer between + and the Math.Net Numerics random number generators to provide thread safety. + When used directly it use the System.Random as random number source. + + + + + Initializes a new instance of the class using + the value of to set whether + the instance is thread safe or not. + + + + + Initializes a new instance of the class. + + if set to true , the class is thread safe. + Thread safe instances are two and half times slower than non-thread + safe classes. + + + + Fills an array with uniform random numbers greater than or equal to 0.0 and less than 1.0. + + The array to fill with random values. + + + + Returns an array of uniform random numbers greater than or equal to 0.0 and less than 1.0. + + The size of the array to fill. + + + + Returns an infinite sequence of uniform random numbers greater than or equal to 0.0 and less than 1.0. + + + + + Returns a random 32-bit signed integer greater than or equal to zero and less than . + + + + + Returns a random number less then a specified maximum. + + The exclusive upper bound of the random number returned. Range: maxExclusive ≥ 1. + A 32-bit signed integer less than . + is zero or negative. + + + + Returns a random number within a specified range. + + The inclusive lower bound of the random number returned. + The exclusive upper bound of the random number returned. Range: maxExclusive > minExclusive. + + A 32-bit signed integer greater than or equal to and less than ; that is, the range of return values includes but not . If equals , is returned. + + is greater than . + + + + Fills an array with random 32-bit signed integers greater than or equal to zero and less than . + + The array to fill with random values. + + + + Returns an array with random 32-bit signed integers greater than or equal to zero and less than . + + The size of the array to fill. + + + + Fills an array with random numbers within a specified range. + + The array to fill with random values. + The exclusive upper bound of the random number returned. Range: maxExclusive ≥ 1. + + + + Returns an array with random 32-bit signed integers within the specified range. + + The size of the array to fill. + The exclusive upper bound of the random number returned. Range: maxExclusive ≥ 1. + + + + Fills an array with random numbers within a specified range. + + The array to fill with random values. + The inclusive lower bound of the random number returned. + The exclusive upper bound of the random number returned. Range: maxExclusive > minExclusive. + + + + Returns an array with random 32-bit signed integers within the specified range. + + The size of the array to fill. + The inclusive lower bound of the random number returned. + The exclusive upper bound of the random number returned. Range: maxExclusive > minExclusive. + + + + Returns an infinite sequence of random 32-bit signed integers greater than or equal to zero and less than . + + + + + Returns an infinite sequence of random numbers within a specified range. + + The inclusive lower bound of the random number returned. + The exclusive upper bound of the random number returned. Range: maxExclusive > minExclusive. + + + + Fills the elements of a specified array of bytes with random numbers. + + An array of bytes to contain random numbers. + is null. + + + + Returns a random number between 0.0 and 1.0. + + A double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Returns a random 32-bit signed integer greater than or equal to zero and less than 2147483647 (). + + + + + Fills the elements of a specified array of bytes with random numbers in full range, including zero and 255 (). + + + + + Returns a random N-bit signed integer greater than or equal to zero and less than 2^N. + N (bit count) is expected to be greater than zero and less than 32 (not verified). + + + + + Returns a random N-bit signed long integer greater than or equal to zero and less than 2^N. + N (bit count) is expected to be greater than zero and less than 64 (not verified). + + + + + Returns a random 32-bit signed integer within the specified range. + + The exclusive upper bound of the random number returned. Range: maxExclusive ≥ 2 (not verified, must be ensured by caller). + + + + Returns a random 32-bit signed integer within the specified range. + + The inclusive lower bound of the random number returned. + The exclusive upper bound of the random number returned. Range: maxExclusive ≥ minExclusive + 2 (not verified, must be ensured by caller). + + + + Multiplicative congruential generator using a modulus of 2^31-1 and a multiplier of 1132489760. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + if set to true, the class is thread safe. + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Multiplicative congruential generator using a modulus of 2^59 and a multiplier of 13^13. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + The seed is set to 1, if the zero is used as the seed. + if set to true , the class is thread safe. + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Random number generator using Mersenne Twister 19937 algorithm. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + Uses the value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + if set to true, the class is thread safe. + + + + Default instance, thread-safe. + + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Returns a random 32-bit signed integer greater than or equal to zero and less than + + + + + Fills the elements of a specified array of bytes with random numbers in full range, including zero and 255 (). + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + A 32-bit combined multiple recursive generator with 2 components of order 3. + + Based off of P. L'Ecuyer, "Combined Multiple Recursive Random Number Generators," Operations Research, 44, 5 (1996), 816--822. + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + if set to true, the class is thread safe. + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Represents a Parallel Additive Lagged Fibonacci pseudo-random number generator. + + + The type bases upon the implementation in the + Boost Random Number Library. + It uses the modulus 232 and by default the "lags" 418 and 1279. Some popular pairs are presented on + Wikipedia - Lagged Fibonacci generator. + + + + + Default value for the ShortLag + + + + + Default value for the LongLag + + + + + The multiplier to compute a double-precision floating point number [0, 1) + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + if set to true, the class is thread safe. + The ShortLag value + TheLongLag value + + + + Gets the short lag of the Lagged Fibonacci pseudo-random number generator. + + + + + Gets the long lag of the Lagged Fibonacci pseudo-random number generator. + + + + + Stores an array of random numbers + + + + + Stores an index for the random number array element that will be accessed next. + + + + + Fills the array with new unsigned random numbers. + + + Generated random numbers are 32-bit unsigned integers greater than or equal to 0 + and less than or equal to . + + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Returns a random 32-bit signed integer greater than or equal to zero and less than + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + A random number generator based on the class in the .NET library. + + + + + Construct a new random number generator with a random seed. + + Uses and uses the value of + to set whether the instance is thread safe. + + + + Construct a new random number generator with random seed. + + The to use. + Uses the value of to set whether the instance is thread safe. + + + + Construct a new random number generator with random seed. + + Uses + if set to true , the class is thread safe. + + + + Construct a new random number generator with random seed. + + The to use. + if set to true , the class is thread safe. + + + + Fills the elements of a specified array of bytes with random numbers in full range, including zero and 255 (). + + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Returns a random 32-bit signed integer greater than or equal to zero and less than + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + This class implements extension methods for the System.Random class. The extension methods generate + pseudo-random distributed numbers for types other than double and int32. + + + + + Fills an array with uniform random numbers greater than or equal to 0.0 and less than 1.0. + + The random number generator. + The array to fill with random values. + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns an array of uniform random numbers greater than or equal to 0.0 and less than 1.0. + + The random number generator. + The size of the array to fill. + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns an infinite sequence of uniform random numbers greater than or equal to 0.0 and less than 1.0. + + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns an array of uniform random bytes. + + The random number generator. + The size of the array to fill. + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Fills an array with uniform random 32-bit signed integers greater than or equal to zero and less than . + + The random number generator. + The array to fill with random values. + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Fills an array with uniform random 32-bit signed integers within the specified range. + + The random number generator. + The array to fill with random values. + Lower bound, inclusive. + Upper bound, exclusive. + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns an infinite sequence of uniform random numbers greater than or equal to 0.0 and less than 1.0. + + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns a nonnegative random number less than . + + The random number generator. + + A 64-bit signed integer greater than or equal to 0, and less than ; that is, + the range of return values includes 0 but not . + + + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns a random number of the full Int32 range. + + The random number generator. + + A 32-bit signed integer of the full range, including 0, negative numbers, + and . + + + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns a random number of the full Int64 range. + + The random number generator. + + A 64-bit signed integer of the full range, including 0, negative numbers, + and . + + + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns a nonnegative decimal floating point random number less than 1.0. + + The random number generator. + + A decimal floating point number greater than or equal to 0.0, and less than 1.0; that is, + the range of return values includes 0.0 but not 1.0. + + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns a random boolean. + + The random number generator. + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Wichmann-Hill’s 1982 combined multiplicative congruential generator. + + See: Wichmann, B. A. & Hill, I. D. (1982), "Algorithm AS 183: + An efficient and portable pseudo-random number generator". Applied Statistics 31 (1982) 188-190 + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + The seed is set to 1, if the zero is used as the seed. + if set to true , the class is thread safe. + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Wichmann-Hill’s 2006 combined multiplicative congruential generator. + + See: Wichmann, B. A. & Hill, I. D. (2006), "Generating good pseudo-random numbers". + Computational Statistics & Data Analysis 51:3 (2006) 1614-1622 + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + The seed is set to 1, if the zero is used as the seed. + if set to true , the class is thread safe. + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Implements a multiply-with-carry Xorshift pseudo random number generator (RNG) specified in Marsaglia, George. (2003). Xorshift RNGs. + Xn = a * Xn−3 + c mod 2^32 + http://www.jstatsoft.org/v08/i14/paper + + + + + The default value for X1. + + + + + The default value for X2. + + + + + The default value for the multiplier. + + + + + The default value for the carry over. + + + + + The multiplier to compute a double-precision floating point number [0, 1) + + + + + Seed or last but three unsigned random number. + + + + + Last but two unsigned random number. + + + + + Last but one unsigned random number. + + + + + The value of the carry over. + + + + + The multiplier. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + Uses the default values of: + + a = 916905990 + c = 13579 + X1 = 77465321 + X2 = 362436069 + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + The multiply value + The initial carry value. + The initial value if X1. + The initial value if X2. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + Note: must be less than . + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + Uses the default values of: + + a = 916905990 + c = 13579 + X1 = 77465321 + X2 = 362436069 + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + The multiply value + The initial carry value. + The initial value if X1. + The initial value if X2. + must be less than . + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + Uses the default values of: + + a = 916905990 + c = 13579 + X1 = 77465321 + X2 = 362436069 + + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + The multiply value + The initial carry value. + The initial value if X1. + The initial value if X2. + must be less than . + + + + Initializes a new instance of the class. + + The seed value. + if set to true, the class is thread safe. + + Uses the default values of: + + a = 916905990 + c = 13579 + X1 = 77465321 + X2 = 362436069 + + + + + Initializes a new instance of the class. + + The seed value. + if set to true, the class is thread safe. + The multiply value + The initial carry value. + The initial value if X1. + The initial value if X2. + must be less than . + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Returns a random 32-bit signed integer greater than or equal to zero and less than + + + + + Fills the elements of a specified array of bytes with random numbers in full range, including zero and 255 (). + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Algorithm by Broyden. + Implementation inspired by Press, Teukolsky, Vetterling, and Flannery, "Numerical Recipes in C", 2nd edition, Cambridge University Press + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + Initial guess of the root. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + Initial guess of the root. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. + Maximum number of iterations. Usually 100. + The root that was found, if any. Undefined if the function returns false. + True if a root with the specified accuracy was found, else false. + + + + Helper method to calculate an approximation of the Jacobian. + + The function. + The argument (initial guess). + The result (of initial guess). + + + + Finds roots to the cubic equation x^3 + a2*x^2 + a1*x + a0 = 0 + Implements the cubic formula in http://mathworld.wolfram.com/CubicFormula.html + + + + + Q and R are transformed variables. + + + + + n^(1/3) - work around a negative double raised to (1/3) + + + + + Find all real-valued roots of the cubic equation a0 + a1*x + a2*x^2 + x^3 = 0. + Note the special coefficient order ascending by exponent (consistent with polynomials). + + + + + Find all three complex roots of the cubic equation d + c*x + b*x^2 + a*x^3 = 0. + Note the special coefficient order ascending by exponent (consistent with polynomials). + + + + + Pure Newton-Raphson root-finding algorithm without any recovery measures in cases it behaves badly. + The algorithm aborts immediately if the root leaves the bound interval. + + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first derivative of the function to find roots from. + The low value of the range where the root is supposed to be. Aborts if it leaves the interval. + The high value of the range where the root is supposed to be. Aborts if it leaves the interval. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first derivative of the function to find roots from. + Initial guess of the root. + The low value of the range where the root is supposed to be. Aborts if it leaves the interval. Default MinValue. + The high value of the range where the root is supposed to be. Aborts if it leaves the interval. Default MaxValue. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first derivative of the function to find roots from. + Initial guess of the root. + The low value of the range where the root is supposed to be. Aborts if it leaves the interval. + The high value of the range where the root is supposed to be. Aborts if it leaves the interval. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Example: 1e-14. + Maximum number of iterations. Example: 100. + The root that was found, if any. Undefined if the function returns false. + True if a root with the specified accuracy was found, else false. + + + + Robust Newton-Raphson root-finding algorithm that falls back to bisection when overshooting or converging too slow, or to subdivision on lacking bracketing. + + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first derivative of the function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + How many parts an interval should be split into for zero crossing scanning in case of lacking bracketing. Default 20. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first derivative of the function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Example: 1e-14. + Maximum number of iterations. Example: 100. + How many parts an interval should be split into for zero crossing scanning in case of lacking bracketing. Example: 20. + The root that was found, if any. Undefined if the function returns false. + True if a root with the specified accuracy was found, else false. + + + + Pure Secant root-finding algorithm without any recovery measures in cases it behaves badly. + The algorithm aborts immediately if the root leaves the bound interval. + + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first guess of the root within the bounds specified. + The second guess of the root within the bounds specified. + The low value of the range where the root is supposed to be. Aborts if it leaves the interval. Default MinValue. + The high value of the range where the root is supposed to be. Aborts if it leaves the interval. Default MaxValue. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first guess of the root within the bounds specified. + The second guess of the root within the bounds specified. + The low value of the range where the root is supposed to be. Aborts if it leaves the interval. + The low value of the range where the root is supposed to be. Aborts if it leaves the interval. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Example: 1e-14. + Maximum number of iterations. Example: 100. + The root that was found, if any. Undefined if the function returns false. + True if a root with the specified accuracy was found, else false + + + Detect a range containing at least one root. + The function to detect roots from. + Lower value of the range. + Upper value of the range + The growing factor of research. Usually 1.6. + Maximum number of iterations. Usually 50. + True if the bracketing operation succeeded, false otherwise. + This iterative methods stops when two values with opposite signs are found. + + + + Algorithm by by Brent, Van Wijngaarden, Dekker et al. + Implementation inspired by Press, Teukolsky, Vetterling, and Flannery, "Numerical Recipes in C", 2nd edition, Cambridge University Press + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + Guess for the low value of the range where the root is supposed to be. Will be expanded if needed. + Guess for the high value of the range where the root is supposed to be. Will be expanded if needed. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Factor at which to expand the bounds, if needed. Default 1.6. + Maximum number of expand iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. + Maximum number of iterations. Usually 100. + The root that was found, if any. Undefined if the function returns false. + True if a root with the specified accuracy was found, else false. + + + Helper method useful for preventing rounding errors. + a*sign(b) + + + + Bisection root-finding algorithm. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + Guess for the low value of the range where the root is supposed to be. Will be expanded if needed. + Guess for the high value of the range where the root is supposed to be. Will be expanded if needed. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Factor at which to expand the bounds, if needed. Default 1.6. + Maximum number of expand iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy for both the root and the function value at the root. The root will be refined until the accuracy or the maximum number of iterations is reached. + Maximum number of iterations. Usually 100. + The root that was found, if any. Undefined if the function returns false. + True if a root with the specified accuracy was found, else false. + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Example: 1e-14. + Maximum number of iterations. Example: 100. + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first derivative of the function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Example: 1e-14. + Maximum number of iterations. Example: 100. + + + + Find both complex roots of the quadratic equation c + b*x + a*x^2 = 0. + Note the special coefficient order ascending by exponent (consistent with polynomials). + + + + + Find all three complex roots of the cubic equation d + c*x + b*x^2 + a*x^3 = 0. + Note the special coefficient order ascending by exponent (consistent with polynomials). + + + + + Find all roots of the Chebychev polynomial of the first kind. + + The polynomial order and therefore the number of roots. + The real domain interval begin where to start sampling. + The real domain interval end where to stop sampling. + Samples in [a,b] at (b+a)/2+(b-1)/2*cos(pi*(2i-1)/(2n)) + + + + Find all roots of the Chebychev polynomial of the second kind. + + The polynomial order and therefore the number of roots. + The real domain interval begin where to start sampling. + The real domain interval end where to stop sampling. + Samples in [a,b] at (b+a)/2+(b-1)/2*cos(pi*i/(n-1)) + + + + Evaluation functions, useful for function approximation. + + + + + Evaluate a polynomial at point x. + Coefficients are ordered by power with power k at index k. + Example: coefficients [3,-1,2] represent y=2x^2-x+3. + + The location where to evaluate the polynomial at. + The coefficients of the polynomial, coefficient for power k at index k. + + + + Evaluate a polynomial at point x. + Coefficients are ordered by power with power k at index k. + Example: coefficients [3,-1,2] represent y=2x^2-x+3. + + The location where to evaluate the polynomial at. + The coefficients of the polynomial, coefficient for power k at index k. + + + + Evaluate a polynomial at point x. + Coefficients are ordered by power with power k at index k. + Example: coefficients [3,-1,2] represent y=2x^2-x+3. + + The location where to evaluate the polynomial at. + The coefficients of the polynomial, coefficient for power k at index k. + + + + Numerically stable series summation + + provides the summands sequentially + Sum + + + Evaluates the series of Chebyshev polynomials Ti at argument x/2. + The series is given by +
+                  N-1
+                   - '
+            y  =   >   coef[i] T (x/2)
+                   -            i
+                  i=0
+            
+ Coefficients are stored in reverse order, i.e. the zero + order term is last in the array. Note N is the number of + coefficients, not the order. +

+ If coefficients are for the interval a to b, x must + have been transformed to x -> 2(2x - b - a)/(b-a) before + entering the routine. This maps x from (a, b) to (-1, 1), + over which the Chebyshev polynomials are defined. +

+ If the coefficients are for the inverted interval, in + which (a, b) is mapped to (1/b, 1/a), the transformation + required is x -> 2(2ab/x - b - a)/(b-a). If b is infinity, + this becomes x -> 4a/x - 1. +

+ SPEED: +

+ Taking advantage of the recurrence properties of the + Chebyshev polynomials, the routine requires one more + addition per loop than evaluating a nested polynomial of + the same degree. +

+ The coefficients of the polynomial. + Argument to the polynomial. + + Reference: https://bpm2.svn.codeplex.com/svn/Common.Numeric/Arithmetic.cs +

+ Marked as Deprecated in + http://people.apache.org/~isabel/mahout_site/mahout-matrix/apidocs/org/apache/mahout/jet/math/Arithmetic.html + + + +

+ Summation of Chebyshev polynomials, using the Clenshaw method with Reinsch modification. + + The no. of terms in the sequence. + The coefficients of the Chebyshev series, length n+1. + The value at which the series is to be evaluated. + + ORIGINAL AUTHOR: + Dr. Allan J. MacLeod; Dept. of Mathematics and Statistics, University of Paisley; High St., PAISLEY, SCOTLAND + REFERENCES: + "An error analysis of the modified Clenshaw method for evaluating Chebyshev and Fourier series" + J. Oliver, J.I.M.A., vol. 20, 1977, pp379-391 + +
+ + + Collection of functions equivalent to those provided by Microsoft Excel + but backed instead by Math.NET Numerics. + We do not recommend to use them except in an intermediate phase when + porting over solutions previously implemented in Excel. + + + + + This partial implementation of the SpecialFunctions class contains all methods related to the modified bessel function. + + + This partial implementation of the SpecialFunctions class contains all methods related to the modified bessel function. + + + This partial implementation of the SpecialFunctions class contains all methods related to the logistic function. + + + This partial implementation of the SpecialFunctions class contains all methods related to the harmonic function. + + + This partial implementation of the SpecialFunctions class contains all methods related to the error function. + + + + + Computes the generalized Exponential Integral function (En). + + The argument of the Exponential Integral function. + Integer power of the denominator term. Generalization index. + The value of the Exponential Integral function. + + This implementation of the computation of the Exponential Integral function follows the derivation in + "Handbook of Mathematical Functions, Applied Mathematics Series, Volume 55", Abramowitz, M., and Stegun, I.A. 1964, reprinted 1968 by + Dover Publications, New York), Chapters 6, 7, and 26. + AND + "Advanced mathematical methods for scientists and engineers", Bender, Carl M.; Steven A. Orszag (1978). page 253 + + + for x > 1 uses continued fraction approach that is often used to compute incomplete gamma. + for 0 < x <= 1 uses Taylor series expansion + + Our unit tests suggest that the accuracy of the Exponential Integral function is correct up to 13 floating point digits. + + + + + Returns the modified Struve function of order 0. + + The value to compute the function of. + + + + Returns the modified Struve function of order 1. + + The value to compute the function of. + + + + Returns the difference between the Bessel I0 and Struve L0 functions. + + The value to compute the function of. + + + + Returns the difference between the Bessel I1 and Struve L1 functions. + + The value to compute the function of. + + + + ************************************** + COEFFICIENTS FOR METHODS bessi0 * + ************************************** + + Chebyshev coefficients for exp(-x) I0(x) + in the interval [0, 8]. + + lim(x->0){ exp(-x) I0(x) } = 1. + + + + Chebyshev coefficients for exp(-x) sqrt(x) I0(x) + in the inverted interval [8, infinity]. + + lim(x->inf){ exp(-x) sqrt(x) I0(x) } = 1/sqrt(2pi). + + + + + ************************************** + COEFFICIENTS FOR METHODS bessi1 * + ************************************** + + Chebyshev coefficients for exp(-x) I1(x) / x + in the interval [0, 8]. + + lim(x->0){ exp(-x) I1(x) / x } = 1/2. + + + + Chebyshev coefficients for exp(-x) sqrt(x) I1(x) + in the inverted interval [8, infinity]. + + lim(x->inf){ exp(-x) sqrt(x) I1(x) } = 1/sqrt(2pi). + + + + + ************************************** + COEFFICIENTS FOR METHODS bessk0, bessk0e * + ************************************** + + Chebyshev coefficients for K0(x) + log(x/2) I0(x) + in the interval [0, 2]. The odd order coefficients are all + zero; only the even order coefficients are listed. + + lim(x->0){ K0(x) + log(x/2) I0(x) } = -EUL. + + + + Chebyshev coefficients for exp(x) sqrt(x) K0(x) + in the inverted interval [2, infinity]. + + lim(x->inf){ exp(x) sqrt(x) K0(x) } = sqrt(pi/2). + + + + + ************************************** + COEFFICIENTS FOR METHODS bessk1, bessk1e * + ************************************** + + Chebyshev coefficients for x(K1(x) - log(x/2) I1(x)) + in the interval [0, 2]. + + lim(x->0){ x(K1(x) - log(x/2) I1(x)) } = 1. + + + + Chebyshev coefficients for exp(x) sqrt(x) K1(x) + in the interval [2, infinity]. + + lim(x->inf){ exp(x) sqrt(x) K1(x) } = sqrt(pi/2). + + + + Returns the modified Bessel function of first kind, order 0 of the argument. +

+ The function is defined as i0(x) = j0( ix ). +

+ The range is partitioned into the two intervals [0, 8] and + (8, infinity). Chebyshev polynomial expansions are employed + in each interval. +

+ The value to compute the bessel function of. + +
+ + Returns the modified Bessel function of first kind, + order 1 of the argument. +

+ The function is defined as i1(x) = -i j1( ix ). +

+ The range is partitioned into the two intervals [0, 8] and + (8, infinity). Chebyshev polynomial expansions are employed + in each interval. +

+ The value to compute the bessel function of. + +
+ + Returns the modified Bessel function of the second kind + of order 0 of the argument. +

+ The range is partitioned into the two intervals [0, 8] and + (8, infinity). Chebyshev polynomial expansions are employed + in each interval. +

+ The value to compute the bessel function of. + +
+ + Returns the exponentially scaled modified Bessel function + of the second kind of order 0 of the argument. + + The value to compute the bessel function of. + + + + Returns the modified Bessel function of the second kind + of order 1 of the argument. +

+ The range is partitioned into the two intervals [0, 2] and + (2, infinity). Chebyshev polynomial expansions are employed + in each interval. +

+ The value to compute the bessel function of. + +
+ + Returns the exponentially scaled modified Bessel function + of the second kind of order 1 of the argument. +

+ k1e(x) = exp(x) * k1(x). +

+ The value to compute the bessel function of. + +
+ + + Computes the logistic function. see: http://en.wikipedia.org/wiki/Logistic + + The parameter for which to compute the logistic function. + The logistic function of . + + + + Computes the logit function, the inverse of the sigmoid logistic function. see: http://en.wikipedia.org/wiki/Logit + + The parameter for which to compute the logit function. This number should be + between 0 and 1. + The logarithm of divided by 1.0 - . + + + + Computes the 'th Harmonic number. + + The Harmonic number which needs to be computed. + The t'th Harmonic number. + + + + Compute the generalized harmonic number of order n of m. (1 + 1/2^m + 1/3^m + ... + 1/n^m) + + The order parameter. + The power parameter. + General Harmonic number. + + + + Computes the logarithm of the Euler Beta function. + + The first Beta parameter, a positive real number. + The second Beta parameter, a positive real number. + The logarithm of the Euler Beta function evaluated at z,w. + If or are not positive. + + + + Computes the Euler Beta function. + + The first Beta parameter, a positive real number. + The second Beta parameter, a positive real number. + The Euler Beta function evaluated at z,w. + If or are not positive. + + + + Returns the lower incomplete (unregularized) beta function + B(a,b,x) = int(t^(a-1)*(1-t)^(b-1),t=0..x) for real a > 0, b > 0, 1 >= x >= 0. + + The first Beta parameter, a positive real number. + The second Beta parameter, a positive real number. + The upper limit of the integral. + The lower incomplete (unregularized) beta function. + + + + Returns the regularized lower incomplete beta function + I_x(a,b) = 1/Beta(a,b) * int(t^(a-1)*(1-t)^(b-1),t=0..x) for real a > 0, b > 0, 1 >= x >= 0. + + The first Beta parameter, a positive real number. + The second Beta parameter, a positive real number. + The upper limit of the integral. + The regularized lower incomplete beta function. + + + + ************************************** + COEFFICIENTS FOR METHOD ErfImp * + ************************************** + + Polynomial coefficients for a numerator of ErfImp + calculation for Erf(x) in the interval [1e-10, 0.5]. + + + + Polynomial coefficients for adenominator of ErfImp + calculation for Erf(x) in the interval [1e-10, 0.5]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [0.5, 0.75]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [0.5, 0.75]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [0.75, 1.25]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [0.75, 1.25]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [1.25, 2.25]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [1.25, 2.25]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [2.25, 3.5]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [2.25, 3.5]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [3.5, 5.25]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [3.5, 5.25]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [5.25, 8]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [5.25, 8]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [8, 11.5]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [8, 11.5]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [11.5, 17]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [11.5, 17]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [17, 24]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [17, 24]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [24, 38]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [24, 38]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [38, 60]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [38, 60]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [60, 85]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [60, 85]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [85, 110]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [85, 110]. + + + + + ************************************** + COEFFICIENTS FOR METHOD ErfInvImp * + ************************************** + + Polynomial coefficients for a numerator of ErfInvImp + calculation for Erf^-1(z) in the interval [0, 0.5]. + + + + Polynomial coefficients for a denominator of ErfInvImp + calculation for Erf^-1(z) in the interval [0, 0.5]. + + + + Polynomial coefficients for a numerator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.5, 0.75]. + + + + Polynomial coefficients for a denominator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.5, 0.75]. + + + + Polynomial coefficients for a numerator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x less than 3. + + + + Polynomial coefficients for a denominator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x less than 3. + + + + Polynomial coefficients for a numerator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x between 3 and 6. + + + + Polynomial coefficients for a denominator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x between 3 and 6. + + + + Polynomial coefficients for a numerator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x between 6 and 18. + + + + Polynomial coefficients for a denominator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x between 6 and 18. + + + + Polynomial coefficients for a numerator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x between 18 and 44. + + + + Polynomial coefficients for a denominator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x between 18 and 44. + + + + Polynomial coefficients for a numerator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x greater than 44. + + + + Polynomial coefficients for a denominator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x greater than 44. + + + + Calculates the error function. + The value to evaluate. + the error function evaluated at given value. + + + returns 1 if x == double.PositiveInfinity. + returns -1 if x == double.NegativeInfinity. + + + + + Calculates the complementary error function. + The value to evaluate. + the complementary error function evaluated at given value. + + + returns 0 if x == double.PositiveInfinity. + returns 2 if x == double.NegativeInfinity. + + + + + Calculates the inverse error function evaluated at z. + The inverse error function evaluated at given value. + + + returns double.PositiveInfinity if z >= 1.0. + returns double.NegativeInfinity if z <= -1.0. + + + Calculates the inverse error function evaluated at z. + value to evaluate. + the inverse error function evaluated at Z. + + + + Implementation of the error function. + + Where to evaluate the error function. + Whether to compute 1 - the error function. + the error function. + + + Calculates the complementary inverse error function evaluated at z. + The complementary inverse error function evaluated at given value. + We have tested this implementation against the arbitrary precision mpmath library + and found cases where we can only guarantee 9 significant figures correct. + + returns double.PositiveInfinity if z <= 0.0. + returns double.NegativeInfinity if z >= 2.0. + + + calculates the complementary inverse error function evaluated at z. + value to evaluate. + the complementary inverse error function evaluated at Z. + + + + The implementation of the inverse error function. + + First intermediate parameter. + Second intermediate parameter. + Third intermediate parameter. + the inverse error function. + + + + Initializes static members of the SpecialFunctions class. + + + + + Computes the factorial function x -> x! of an integer number > 0. The function can represent all number up + to 22! exactly, all numbers up to 170! using a double representation. All larger values will overflow. + + A value value! for value > 0 + + If you need to multiply or divide various such factorials, consider using the logarithmic version + instead so you can add instead of multiply and subtract instead of divide, and + then exponentiate the result using . This will also circumvent the problem that + factorials become very large even for small parameters. + + + + + + Computes the factorial of an integer. + + + + + Computes the logarithmic factorial function x -> ln(x!) of an integer number > 0. + + A value value! for value > 0 + + + + Computes the binomial coefficient: n choose k. + + A nonnegative value n. + A nonnegative value h. + The binomial coefficient: n choose k. + + + + Computes the natural logarithm of the binomial coefficient: ln(n choose k). + + A nonnegative value n. + A nonnegative value h. + The logarithmic binomial coefficient: ln(n choose k). + + + + Computes the multinomial coefficient: n choose n1, n2, n3, ... + + A nonnegative value n. + An array of nonnegative values that sum to . + The multinomial coefficient. + if is . + If or any of the are negative. + If the sum of all is not equal to . + + + + The order of the approximation. + + + + + Auxiliary variable when evaluating the function. + + + + + Polynomial coefficients for the approximation. + + + + + Computes the logarithm of the Gamma function. + + The argument of the gamma function. + The logarithm of the gamma function. + + This implementation of the computation of the gamma and logarithm of the gamma function follows the derivation in + "An Analysis Of The Lanczos Gamma Approximation", Glendon Ralph Pugh, 2004. + We use the implementation listed on p. 116 which achieves an accuracy of 16 floating point digits. Although 16 digit accuracy + should be sufficient for double values, improving accuracy is possible (see p. 126 in Pugh). + Our unit tests suggest that the accuracy of the Gamma function is correct up to 14 floating point digits. + + + + + Computes the Gamma function. + + The argument of the gamma function. + The logarithm of the gamma function. + + + This implementation of the computation of the gamma and logarithm of the gamma function follows the derivation in + "An Analysis Of The Lanczos Gamma Approximation", Glendon Ralph Pugh, 2004. + We use the implementation listed on p. 116 which should achieve an accuracy of 16 floating point digits. Although 16 digit accuracy + should be sufficient for double values, improving accuracy is possible (see p. 126 in Pugh). + + Our unit tests suggest that the accuracy of the Gamma function is correct up to 13 floating point digits. + + + + + Returns the upper incomplete regularized gamma function + Q(a,x) = 1/Gamma(a) * int(exp(-t)t^(a-1),t=0..x) for real a > 0, x > 0. + + The argument for the gamma function. + The lower integral limit. + The upper incomplete regularized gamma function. + + + + Returns the upper incomplete gamma function + Gamma(a,x) = int(exp(-t)t^(a-1),t=0..x) for real a > 0, x > 0. + + The argument for the gamma function. + The lower integral limit. + The upper incomplete gamma function. + + + + Returns the lower incomplete gamma function + gamma(a,x) = int(exp(-t)t^(a-1),t=0..x) for real a > 0, x > 0. + + The argument for the gamma function. + The upper integral limit. + The lower incomplete gamma function. + + + + Returns the lower incomplete regularized gamma function + P(a,x) = 1/Gamma(a) * int(exp(-t)t^(a-1),t=0..x) for real a > 0, x > 0. + + The argument for the gamma function. + The upper integral limit. + The lower incomplete gamma function. + + + + Returns the inverse P^(-1) of the regularized lower incomplete gamma function + P(a,x) = 1/Gamma(a) * int(exp(-t)t^(a-1),t=0..x) for real a > 0, x > 0, + such that P^(-1)(a,P(a,x)) == x. + + + + + Computes the Digamma function which is mathematically defined as the derivative of the logarithm of the gamma function. + This implementation is based on + Jose Bernardo + Algorithm AS 103: + Psi ( Digamma ) Function, + Applied Statistics, + Volume 25, Number 3, 1976, pages 315-317. + Using the modifications as in Tom Minka's lightspeed toolbox. + + The argument of the digamma function. + The value of the DiGamma function at . + + + + Computes the inverse Digamma function: this is the inverse of the logarithm of the gamma function. This function will + only return solutions that are positive. + This implementation is based on the bisection method. + + The argument of the inverse digamma function. + The positive solution to the inverse DiGamma function at . + + + + Numerically stable exponential minus one, i.e. x -> exp(x)-1 + + A number specifying a power. + Returns exp(power)-1. + + + + Numerically stable hypotenuse of a right angle triangle, i.e. (a,b) -> sqrt(a^2 + b^2) + + The length of side a of the triangle. + The length of side b of the triangle. + Returns sqrt(a2 + b2) without underflow/overflow. + + + + Numerically stable hypotenuse of a right angle triangle, i.e. (a,b) -> sqrt(a^2 + b^2) + + The length of side a of the triangle. + The length of side b of the triangle. + Returns sqrt(a2 + b2) without underflow/overflow. + + + + Numerically stable hypotenuse of a right angle triangle, i.e. (a,b) -> sqrt(a^2 + b^2) + + The length of side a of the triangle. + The length of side b of the triangle. + Returns sqrt(a2 + b2) without underflow/overflow. + + + + Numerically stable hypotenuse of a right angle triangle, i.e. (a,b) -> sqrt(a^2 + b^2) + + The length of side a of the triangle. + The length of side b of the triangle. + Returns sqrt(a2 + b2) without underflow/overflow. + + + + Valley-shaped Rosenbrock function for 2 dimensions: (x,y) -> (1-x)^2 + 100*(y-x^2)^2. + This function has a global minimum at (1,1) with f(1,1) = 0. + Common range: [-5,10] or [-2.048,2.048]. + + + https://en.wikipedia.org/wiki/Rosenbrock_function + http://www.sfu.ca/~ssurjano/rosen.html + + + + + Valley-shaped Rosenbrock function for 2 or more dimensions. + This function have a global minimum of all ones and, for 8 > N > 3, a local minimum at (-1,1,...,1). + + + https://en.wikipedia.org/wiki/Rosenbrock_function + http://www.sfu.ca/~ssurjano/rosen.html + + + + + Himmelblau, a multi-modal function: (x,y) -> (x^2+y-11)^2 + (x+y^2-7)^2 + This function has 4 global minima with f(x,y) = 0. + Common range: [-6,6]. + Named after David Mautner Himmelblau + + + https://en.wikipedia.org/wiki/Himmelblau%27s_function + + + + + Rastrigin, a highly multi-modal function with many local minima. + Global minimum of all zeros with f(0) = 0. + Common range: [-5.12,5.12]. + + + https://en.wikipedia.org/wiki/Rastrigin_function + http://www.sfu.ca/~ssurjano/rastr.html + + + + + Drop-Wave, a multi-modal and highly complex function with many local minima. + Global minimum of all zeros with f(0) = -1. + Common range: [-5.12,5.12]. + + + http://www.sfu.ca/~ssurjano/drop.html + + + + + Ackley, a function with many local minima. It is nearly flat in outer regions but has a large hole at the center. + Global minimum of all zeros with f(0) = 0. + Common range: [-32.768, 32.768]. + + + http://www.sfu.ca/~ssurjano/ackley.html + + + + + Bowl-shaped first Bohachevsky function. + Global minimum of all zeros with f(0,0) = 0. + Common range: [-100, 100] + + + http://www.sfu.ca/~ssurjano/boha.html + + + + + Plate-shaped Matyas function. + Global minimum of all zeros with f(0,0) = 0. + Common range: [-10, 10]. + + + http://www.sfu.ca/~ssurjano/matya.html + + + + + Valley-shaped six-hump camel back function. + Two global minima and four local minima. Global minima with f(x) ) -1.0316 at (0.0898,-0.7126) and (-0.0898,0.7126). + Common range: x in [-3,3], y in [-2,2]. + + + http://www.sfu.ca/~ssurjano/camel6.html + + + + + Statistics operating on arrays assumed to be unsorted. + WARNING: Methods with the Inplace-suffix may modify the data array by reordering its entries. + + + + + + + + Returns the smallest value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the largest value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the smallest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the largest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the geometric mean of the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the harmonic mean of the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population variance from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the population variance from the full population provided as unsorted array. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population standard deviation from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the population standard deviation from the full population provided as unsorted array. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population variance from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN and NaN for variance if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population standard deviation from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN and NaN for standard deviation if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population covariance from the provided two sample arrays. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + First sample array. + Second sample array. + + + + Evaluates the population covariance from the full population provided as two arrays. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + First population array. + Second population array. + + + + Estimates the root mean square (RMS) also known as quadratic mean from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the order statistic (order 1..N) from the unsorted data array. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + One-based order of the statistic, must be between 1 and N (inclusive). + + + + Estimates the median value from the unsorted data array. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the p-Percentile value from the unsorted data array. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Percentile selector, between 0 and 100 (inclusive). + + + + Estimates the first quartile value from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the third quartile value from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the inter-quartile range from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates {min, lower-quantile, median, upper-quantile, max} from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the tau-th quantile from the unsorted data array. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Quantile selector, between 0.0 and 1.0 (inclusive). + + R-8, SciPy-(1/3,1/3): + Linear interpolation of the approximate medians for order statistics. + When tau < (2/3) / (N + 1/3), use x1. When tau >= (N - 1/3) / (N + 1/3), use xN. + + + + + Estimates the tau-th quantile from the unsorted data array. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile defintion can be specified + by 4 parameters a, b, c and d, consistent with Mathematica. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Quantile selector, between 0.0 and 1.0 (inclusive) + a-parameter + b-parameter + c-parameter + d-parameter + + + + Estimates the tau-th quantile from the unsorted data array. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Quantile selector, between 0.0 and 1.0 (inclusive) + Quantile definition, to choose what product/definition it should be consistent with + + + + Evaluates the rank of each entry of the unsorted data array. + The rank definition can be specified to be compatible + with an existing system. + WARNING: Works inplace and can thus causes the data array to be reordered. + + + + + Returns the smallest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the smallest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the largest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the largest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the geometric mean of the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the harmonic mean of the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population variance from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the population variance from the full population provided as unsorted array. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population standard deviation from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the population standard deviation from the full population provided as unsorted array. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population variance from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN and NaN for variance if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population standard deviation from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN and NaN for standard deviation if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population covariance from the provided two sample arrays. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + First sample array. + Second sample array. + + + + Evaluates the population covariance from the full population provided as two arrays. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + First population array. + Second population array. + + + + Estimates the root mean square (RMS) also known as quadratic mean from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the smallest value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the smallest value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the smallest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the largest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the geometric mean of the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the harmonic mean of the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population variance from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the population variance from the full population provided as unsorted array. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population standard deviation from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the population standard deviation from the full population provided as unsorted array. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population variance from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN and NaN for variance if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population standard deviation from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN and NaN for standard deviation if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population covariance from the provided two sample arrays. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + First sample array. + Second sample array. + + + + Evaluates the population covariance from the full population provided as two arrays. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + First population array. + Second population array. + + + + Estimates the root mean square (RMS) also known as quadratic mean from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the order statistic (order 1..N) from the unsorted data array. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + One-based order of the statistic, must be between 1 and N (inclusive). + + + + Estimates the median value from the unsorted data array. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the p-Percentile value from the unsorted data array. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Percentile selector, between 0 and 100 (inclusive). + + + + Estimates the first quartile value from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the third quartile value from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the inter-quartile range from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates {min, lower-quantile, median, upper-quantile, max} from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the tau-th quantile from the unsorted data array. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Quantile selector, between 0.0 and 1.0 (inclusive). + + R-8, SciPy-(1/3,1/3): + Linear interpolation of the approximate medians for order statistics. + When tau < (2/3) / (N + 1/3), use x1. When tau >= (N - 1/3) / (N + 1/3), use xN. + + + + + Estimates the tau-th quantile from the unsorted data array. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile defintion can be specified + by 4 parameters a, b, c and d, consistent with Mathematica. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Quantile selector, between 0.0 and 1.0 (inclusive) + a-parameter + b-parameter + c-parameter + d-parameter + + + + Estimates the tau-th quantile from the unsorted data array. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Quantile selector, between 0.0 and 1.0 (inclusive) + Quantile definition, to choose what product/definition it should be consistent with + + + + Evaluates the rank of each entry of the unsorted data array. + The rank definition can be specified to be compatible + with an existing system. + WARNING: Works inplace and can thus causes the data array to be reordered. + + + + + Running statistics over a window of data, allows updating by adding values. + + + + + Gets the total number of samples. + + + + + Returns the minimum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + + + + Returns the maximum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + + + + Evaluates the sample mean, an estimate of the population mean. + Returns NaN if data is empty or if any entry is NaN. + + + + + Estimates the unbiased population variance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + + + + Evaluates the variance from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + + + + Estimates the unbiased population standard deviation from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + + + + Evaluates the standard deviation from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + + + + Update the running statistics by adding another observed sample (in-place). + + + + + Update the running statistics by adding a sequence of observed sample (in-place). + + + + + Running statistics accumulator, allows updating by adding values + or by combining two accumulators. + + + This type declares a DataContract for out of the box ephemeral serialization + with engines like DataContractSerializer, Protocol Buffers and FsPickler, + but does not guarantee any compatibility between versions. + It is not recommended to rely on this mechanism for durable persistance. + + + + + Gets the total number of samples. + + + + + Returns the minimum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + + + + Returns the maximum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + + + + Evaluates the sample mean, an estimate of the population mean. + Returns NaN if data is empty or if any entry is NaN. + + + + + Estimates the unbiased population variance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + + + + Evaluates the variance from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + + + + Estimates the unbiased population standard deviation from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + + + + Evaluates the standard deviation from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + + + + Estimates the unbiased population skewness from the provided samples. + Uses a normalizer (Bessel's correction; type 2). + Returns NaN if data has less than three entries or if any entry is NaN. + + + + + Evaluates the population skewness from the full population. + Does not use a normalizer and would thus be biased if applied to a subset (type 1). + Returns NaN if data has less than two entries or if any entry is NaN. + + + + + Estimates the unbiased population kurtosis from the provided samples. + Uses a normalizer (Bessel's correction; type 2). + Returns NaN if data has less than four entries or if any entry is NaN. + + + + + Evaluates the population kurtosis from the full population. + Does not use a normalizer and would thus be biased if applied to a subset (type 1). + Returns NaN if data has less than three entries or if any entry is NaN. + + + + + Update the running statistics by adding another observed sample (in-place). + + + + + Update the running statistics by adding a sequence of observed sample (in-place). + + + + + Create a new running statistics over the combined samples of two existing running statistics. + + + + Replace ties with their mean (non-integer ranks). Default. + + + Replace ties with their minimum (typical sports ranking). + + + Replace ties with their maximum. + + + Permutation with increasing values at each index of ties. + + + + Statistics operating on an array already sorted ascendingly. + + + + + + + + Returns the smallest value from the sorted data array (ascending). + + Sample array, must be sorted ascendingly. + + + + Returns the largest value from the sorted data array (ascending). + + Sample array, must be sorted ascendingly. + + + + Returns the order statistic (order 1..N) from the sorted data array (ascending). + + Sample array, must be sorted ascendingly. + One-based order of the statistic, must be between 1 and N (inclusive). + + + + Estimates the median value from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the p-Percentile value from the sorted data array (ascending). + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + Percentile selector, between 0 and 100 (inclusive). + + + + Estimates the first quartile value from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the third quartile value from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the inter-quartile range from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates {min, lower-quantile, median, upper-quantile, max} from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the tau-th quantile from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + Quantile selector, between 0.0 and 1.0 (inclusive). + + R-8, SciPy-(1/3,1/3): + Linear interpolation of the approximate medians for order statistics. + When tau < (2/3) / (N + 1/3), use x1. When tau >= (N - 1/3) / (N + 1/3), use xN. + + + + + Estimates the tau-th quantile from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile defintion can be specified + by 4 parameters a, b, c and d, consistent with Mathematica. + + Sample array, must be sorted ascendingly. + Quantile selector, between 0.0 and 1.0 (inclusive). + a-parameter + b-parameter + c-parameter + d-parameter + + + + Estimates the tau-th quantile from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + Sample array, must be sorted ascendingly. + Quantile selector, between 0.0 and 1.0 (inclusive). + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the empirical cumulative distribution function (CDF) at x from the sorted data array (ascending). + + The data sample sequence. + The value where to estimate the CDF at. + + + + Estimates the quantile tau from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile value. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Evaluates the rank of each entry of the sorted data array (ascending). + The rank definition can be specified to be compatible + with an existing system. + + + + + Returns the smallest value from the sorted data array (ascending). + + Sample array, must be sorted ascendingly. + + + + Returns the largest value from the sorted data array (ascending). + + Sample array, must be sorted ascendingly. + + + + Returns the order statistic (order 1..N) from the sorted data array (ascending). + + Sample array, must be sorted ascendingly. + One-based order of the statistic, must be between 1 and N (inclusive). + + + + Estimates the median value from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the p-Percentile value from the sorted data array (ascending). + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + Percentile selector, between 0 and 100 (inclusive). + + + + Estimates the first quartile value from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the third quartile value from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the inter-quartile range from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates {min, lower-quantile, median, upper-quantile, max} from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the tau-th quantile from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + Quantile selector, between 0.0 and 1.0 (inclusive). + + R-8, SciPy-(1/3,1/3): + Linear interpolation of the approximate medians for order statistics. + When tau < (2/3) / (N + 1/3), use x1. When tau >= (N - 1/3) / (N + 1/3), use xN. + + + + + Estimates the tau-th quantile from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile defintion can be specified + by 4 parameters a, b, c and d, consistent with Mathematica. + + Sample array, must be sorted ascendingly. + Quantile selector, between 0.0 and 1.0 (inclusive). + a-parameter + b-parameter + c-parameter + d-parameter + + + + Estimates the tau-th quantile from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + Sample array, must be sorted ascendingly. + Quantile selector, between 0.0 and 1.0 (inclusive). + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the empirical cumulative distribution function (CDF) at x from the sorted data array (ascending). + + The data sample sequence. + The value where to estimate the CDF at. + + + + Estimates the quantile tau from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile value. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Evaluates the rank of each entry of the sorted data array (ascending). + The rank definition can be specified to be compatible + with an existing system. + + + + + Statistics operating on an IEnumerable in a single pass, without keeping the full data in memory. + Can be used in a streaming way, e.g. on large datasets not fitting into memory. + + + + + + + + Returns the smallest value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the smallest value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the largest value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the largest value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the smallest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the smallest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the largest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the largest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the smallest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the smallest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the largest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the largest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the arithmetic sample mean from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the arithmetic sample mean from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the geometric mean of the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the geometric mean of the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the harmonic mean of the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the harmonic mean of the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the unbiased population variance from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the unbiased population variance from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the population variance from the full population provided as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the population variance from the full population provided as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the unbiased population standard deviation from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the unbiased population standard deviation from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the population standard deviation from the full population provided as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the population standard deviation from the full population provided as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population variance from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN, and NaN for variance if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population variance from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN, and NaN for variance if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population standard deviation from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN, and NaN for standard deviation if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population standard deviation from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN, and NaN for standard deviation if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the unbiased population covariance from the provided two sample enumerable sequences, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + First sample stream. + Second sample stream. + + + + Estimates the unbiased population covariance from the provided two sample enumerable sequences, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + First sample stream. + Second sample stream. + + + + Evaluates the population covariance from the full population provided as two enumerable sequences, in a single pass without memoization. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + First population stream. + Second population stream. + + + + Evaluates the population covariance from the full population provided as two enumerable sequences, in a single pass without memoization. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + First population stream. + Second population stream. + + + + Estimates the root mean square (RMS) also known as quadratic mean from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the root mean square (RMS) also known as quadratic mean from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Calculates the entropy of a stream of double values. + Returns NaN if any of the values in the stream are NaN. + + The input stream to evaluate. + + + + + A class with correlation measures between two datasets. + + + + + Computes the Pearson Product-Moment Correlation coefficient. + + Sample data A. + Sample data B. + The Pearson product-moment correlation coefficient. + + + + Computes the Weighted Pearson Product-Moment Correlation coefficient. + + Sample data A. + Sample data B. + Corresponding weights of data. + The Weighted Pearson product-moment correlation coefficient. + + + + Computes the Pearson Product-Moment Correlation matrix. + + Array of sample data vectors. + The Pearson product-moment correlation matrix. + + + + Computes the Pearson Product-Moment Correlation matrix. + + Enumerable of sample data vectors. + The Pearson product-moment correlation matrix. + + + + Computes the Spearman Ranked Correlation coefficient. + + Sample data series A. + Sample data series B. + The Spearman ranked correlation coefficient. + + + + Computes the Spearman Ranked Correlation matrix. + + Array of sample data vectors. + The Spearman ranked correlation matrix. + + + + Computes the Spearman Ranked Correlation matrix. + + Enumerable of sample data vectors. + The Spearman ranked correlation matrix. + + + + Computes the basic statistics of data set. The class meets the + NIST standard of accuracy for mean, variance, and standard deviation + (the only statistics they provide exact values for) and exceeds them + in increased accuracy mode. + Recommendation: consider to use RunningStatistics instead. + + + This type declares a DataContract for out of the box ephemeral serialization + with engines like DataContractSerializer, Protocol Buffers and FsPickler, + but does not guarantee any compatibility between versions. + It is not recommended to rely on this mechanism for durable persistance. + + + + + Initializes a new instance of the class. + + The sample data. + + If set to true, increased accuracy mode used. + Increased accuracy mode uses types for internal calculations. + + + Don't use increased accuracy for data sets containing large values (in absolute value). + This may cause the calculations to overflow. + + + + + Initializes a new instance of the class. + + The sample data. + + If set to true, increased accuracy mode used. + Increased accuracy mode uses types for internal calculations. + + + Don't use increased accuracy for data sets containing large values (in absolute value). + This may cause the calculations to overflow. + + + + + Gets the size of the sample. + + The size of the sample. + + + + Gets the sample mean. + + The sample mean. + + + + Gets the unbiased population variance estimator (on a dataset of size N will use an N-1 normalizer). + + The sample variance. + + + + Gets the unbiased population standard deviation (on a dataset of size N will use an N-1 normalizer). + + The sample standard deviation. + + + + Gets the sample skewness. + + The sample skewness. + Returns zero if is less than three. + + + + Gets the sample kurtosis. + + The sample kurtosis. + Returns zero if is less than four. + + + + Gets the maximum sample value. + + The maximum sample value. + + + + Gets the minimum sample value. + + The minimum sample value. + + + + Computes descriptive statistics from a stream of data values. + + A sequence of datapoints. + + + + Computes descriptive statistics from a stream of nullable data values. + + A sequence of datapoints. + + + + Computes descriptive statistics from a stream of data values. + + A sequence of datapoints. + + + + Computes descriptive statistics from a stream of nullable data values. + + A sequence of datapoints. + + + + Internal use. Method use for setting the statistics. + + For setting Mean. + For setting Variance. + For setting Skewness. + For setting Kurtosis. + For setting Minimum. + For setting Maximum. + For setting Count. + + + + A consists of a series of s, + each representing a region limited by a lower bound (exclusive) and an upper bound (inclusive). + + + This type declares a DataContract for out of the box ephemeral serialization + with engines like DataContractSerializer, Protocol Buffers and FsPickler, + but does not guarantee any compatibility between versions. + It is not recommended to rely on this mechanism for durable persistance. + + + + + This IComparer performs comparisons between a point and a bucket. + + + + + Compares a point and a bucket. The point will be encapsulated in a bucket with width 0. + + The first bucket to compare. + The second bucket to compare. + -1 when the point is less than this bucket, 0 when it is in this bucket and 1 otherwise. + + + + Lower Bound of the Bucket. + + + + + Upper Bound of the Bucket. + + + + + The number of datapoints in the bucket. + + + Value may be NaN if this was constructed as a argument. + + + + + Initializes a new instance of the Bucket class. + + + + + Constructs a Bucket that can be used as an argument for a + like when performing a Binary search. + + Value to look for + + + + Creates a copy of the Bucket with the lowerbound, upperbound and counts exactly equal. + + A cloned Bucket object. + + + + Width of the Bucket. + + + + + True if this is a single point argument for + when performing a Binary search. + + + + + Default comparer. + + + + + This method check whether a point is contained within this bucket. + + The point to check. + + 0 if the point falls within the bucket boundaries; + -1 if the point is smaller than the bucket, + +1 if the point is larger than the bucket. + + + + Comparison of two disjoint buckets. The buckets cannot be overlapping. + + + 0 if UpperBound and LowerBound are bit-for-bit equal + 1 if This bucket is lower that the compared bucket + -1 otherwise + + + + + Checks whether two Buckets are equal. + + + UpperBound and LowerBound are compared bit-for-bit, but This method tolerates a + difference in Count given by . + + + + + Provides a hash code for this bucket. + + + + + Formats a human-readable string for this bucket. + + + + + A class which computes histograms of data. + + + + + Contains all the Buckets of the Histogram. + + + + + Indicates whether the elements of buckets are currently sorted. + + + + + Initializes a new instance of the Histogram class. + + + + + Constructs a Histogram with a specific number of equally sized buckets. The upper and lower bound of the histogram + will be set to the smallest and largest datapoint. + + The datasequence to build a histogram on. + The number of buckets to use. + + + + Constructs a Histogram with a specific number of equally sized buckets. + + The datasequence to build a histogram on. + The number of buckets to use. + The histogram lower bound. + The histogram upper bound. + + + + Add one data point to the histogram. If the datapoint falls outside the range of the histogram, + the lowerbound or upperbound will automatically adapt. + + The datapoint which we want to add. + + + + Add a sequence of data point to the histogram. If the datapoint falls outside the range of the histogram, + the lowerbound or upperbound will automatically adapt. + + The sequence of datapoints which we want to add. + + + + Adds a Bucket to the Histogram. + + + + + Sort the buckets if needed. + + + + + Returns the Bucket that contains the value v. + + The point to search the bucket for. + A copy of the bucket containing point . + + + + Returns the index in the Histogram of the Bucket + that contains the value v. + + The point to search the bucket index for. + The index of the bucket containing the point. + + + + Returns the lower bound of the histogram. + + + + + Returns the upper bound of the histogram. + + + + + Gets the n'th bucket. + + The index of the bucket to be returned. + A copy of the n'th bucket. + + + + Gets the number of buckets. + + + + + Gets the total number of datapoints in the histogram. + + + + + Prints the buckets contained in the . + + + + + A hybrid Monte Carlo sampler for multivariate distributions. + + + + + Number of parameters in the density function. + + + + + Distribution to sample momentum from. + + + + + Standard deviations used in the sampling of different components of the + momentum. + + + + + Gets or sets the standard deviations used in the sampling of different components of the + momentum. + + When the length of pSdv is not the same as Length. + + + + Constructs a new Hybrid Monte Carlo sampler for a multivariate probability distribution. + The components of the momentum will be sampled from a normal distribution with standard deviation + 1 using the default random + number generator. A three point estimation will be used for differentiation. + This constructor will set the burn interval. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + When the number of burnInterval iteration is negative. + + + + Constructs a new Hybrid Monte Carlo sampler for a multivariate probability distribution. + The components of the momentum will be sampled from a normal distribution with standard deviation + specified by pSdv using the default random + number generator. A three point estimation will be used for differentiation. + This constructor will set the burn interval. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + The standard deviations of the normal distributions that are used to sample + the components of the momentum. + When the number of burnInterval iteration is negative. + + + + Constructs a new Hybrid Monte Carlo sampler for a multivariate probability distribution. + The components of the momentum will be sampled from a normal distribution with standard deviation + specified by pSdv using the a random number generator provided by the user. + A three point estimation will be used for differentiation. + This constructor will set the burn interval. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + The standard deviations of the normal distributions that are used to sample + the components of the momentum. + Random number generator used for sampling the momentum. + When the number of burnInterval iteration is negative. + + + + Constructs a new Hybrid Monte Carlo sampler for a multivariate probability distribution. + The components of the momentum will be sampled from a normal distribution with standard deviations + given by pSdv. This constructor will set the burn interval, the method used for + numerical differentiation and the random number generator. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + The standard deviations of the normal distributions that are used to sample + the components of the momentum. + Random number generator used for sampling the momentum. + The method used for numerical differentiation. + When the number of burnInterval iteration is negative. + When the length of pSdv is not the same as x0. + + + + Initialize parameters. + + The current location of the sampler. + + + + Checking that the location and the momentum are of the same dimension and that each component is positive. + + The standard deviations used for sampling the momentum. + When the length of pSdv is not the same as Length or if any + component is negative. + When pSdv is null. + + + + Use for copying objects in the Burn method. + + The source of copying. + A copy of the source object. + + + + Use for creating temporary objects in the Burn method. + + An object of type T. + + + + + + + + + + + + + Samples the momentum from a normal distribution. + + The momentum to be randomized. + + + + The default method used for computing the gradient. Uses a simple three point estimation. + + Function which the gradient is to be evaluated. + The location where the gradient is to be evaluated. + The gradient of the function at the point x. + + + + The Hybrid (also called Hamiltonian) Monte Carlo produces samples from distribution P using a set + of Hamiltonian equations to guide the sampling process. It uses the negative of the log density as + a potential energy, and a randomly generated momentum to set up a Hamiltonian system, which is then used + to sample the distribution. This can result in a faster convergence than the random walk Metropolis sampler + (). + + The type of samples this sampler produces. + + + + The delegate type that defines a derivative evaluated at a certain point. + + Function to be differentiated. + Value where the derivative is computed. + + + + Evaluates the energy function of the target distribution. + + + + + The current location of the sampler. + + + + + The number of burn iterations between two samples. + + + + + The size of each step in the Hamiltonian equation. + + + + + The number of iterations in the Hamiltonian equation. + + + + + The algorithm used for differentiation. + + + + + Gets or sets the number of iterations in between returning samples. + + When burn interval is negative. + + + + Gets or sets the number of iterations in the Hamiltonian equation. + + When frogleap steps is negative or zero. + + + + Gets or sets the size of each step in the Hamiltonian equation. + + When step size is negative or zero. + + + + Constructs a new Hybrid Monte Carlo sampler. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + Random number generator used for sampling the momentum. + The method used for differentiation. + When the number of burnInterval iteration is negative. + When either x0, pdfLnP or diff is null. + + + + Returns a sample from the distribution P. + + + + + This method runs the sampler for a number of iterations without returning a sample + + + + + Method used to update the sample location. Used in the end of the loop. + + The old energy. + The old gradient/derivative of the energy. + The new sample. + The new gradient/derivative of the energy. + The new energy. + The difference between the old Hamiltonian and new Hamiltonian. Use to determine + if an update should take place. + + + + Use for creating temporary objects in the Burn method. + + An object of type T. + + + + Use for copying objects in the Burn method. + + The source of copying. + A copy of the source object. + + + + Method for doing dot product. + + First vector/scalar in the product. + Second vector/scalar in the product. + + + + Method for adding, multiply the second vector/scalar by factor and then + add it to the first vector/scalar. + + First vector/scalar. + Scalar factor multiplying by the second vector/scalar. + Second vector/scalar. + + + + Multiplying the second vector/scalar by factor and then subtract it from + the first vector/scalar. + + First vector/scalar. + Scalar factor to be multiplied to the second vector/scalar. + Second vector/scalar. + + + + Method for sampling a random momentum. + + Momentum to be randomized. + + + + The Hamiltonian equations that is used to produce the new sample. + + + + + Method to compute the Hamiltonian used in the method. + + The momentum. + The energy. + Hamiltonian=E+p.p/2 + + + + Method to check and set a quantity to a non-negative value. + + Proposed value to be checked. + Returns value if it is greater than or equal to zero. + Throws when value is negative. + + + + Method to check and set a quantity to a non-negative value. + + Proposed value to be checked. + Returns value if it is greater than to zero. + Throws when value is negative or zero. + + + + Method to check and set a quantity to a non-negative value. + + Proposed value to be checked. + Returns value if it is greater than zero. + Throws when value is negative or zero. + + + + Provides utilities to analysis the convergence of a set of samples from + a . + + + + + Computes the auto correlations of a series evaluated by a function f. + + The series for computing the auto correlation. + The lag in the series + The function used to evaluate the series. + The auto correlation. + Throws if lag is zero or if lag is + greater than or equal to the length of Series. + + + + Computes the effective size of the sample when evaluated by a function f. + + The samples. + The function use for evaluating the series. + The effective size when auto correlation is taken into account. + + + + A method which samples datapoints from a proposal distribution. The implementation of this sampler + is stateless: no variables are saved between two calls to Sample. This proposal is different from + in that it doesn't take any parameters; it samples random + variables from the whole domain. + + The type of the datapoints. + A sample from the proposal distribution. + + + + A method which samples datapoints from a proposal distribution given an initial sample. The implementation + of this sampler is stateless: no variables are saved between two calls to Sample. This proposal is different from + in that it samples locally around an initial point. In other words, it + makes a small local move rather than producing a global sample from the proposal. + + The type of the datapoints. + The initial sample. + A sample from the proposal distribution. + + + + A function which evaluates a density. + + The type of data the distribution is over. + The sample we want to evaluate the density for. + + + + A function which evaluates a log density. + + The type of data the distribution is over. + The sample we want to evaluate the log density for. + + + + A function which evaluates the log of a transition kernel probability. + + The type for the space over which this transition kernel is defined. + The new state in the transition. + The previous state in the transition. + The log probability of the transition. + + + + The interface which every sampler must implement. + + The type of samples this sampler produces. + + + + The random number generator for this class. + + + + + Keeps track of the number of accepted samples. + + + + + Keeps track of the number of calls to the proposal sampler. + + + + + Initializes a new instance of the class. + + Thread safe instances are two and half times slower than non-thread + safe classes. + + + + Gets or sets the random number generator. + + When the random number generator is null. + + + + Returns one sample. + + + + + Returns a number of samples. + + The number of samples we want. + An array of samples. + + + + Gets the acceptance rate of the sampler. + + + + + Metropolis-Hastings sampling produces samples from distribition P by sampling from a proposal distribution Q + and accepting/rejecting based on the density of P. Metropolis-Hastings sampling doesn't require that the + proposal distribution Q is symmetric in comparison to . It does need to + be able to evaluate the proposal sampler's log density though. All densities are required to be in log space. + + The Metropolis-Hastings sampler is a stateful sampler. It keeps track of where it currently is in the domain + of the distribution P. + + The type of samples this sampler produces. + + + + Evaluates the log density function of the target distribution. + + + + + Evaluates the log transition probability for the proposal distribution. + + + + + A function which samples from a proposal distribution. + + + + + The current location of the sampler. + + + + + The log density at the current location. + + + + + The number of burn iterations between two samples. + + + + + Constructs a new Metropolis-Hastings sampler using the default random number generator. This + constructor will set the burn interval. + + The initial sample. + The log density of the distribution we want to sample from. + The log transition probability for the proposal distribution. + A method that samples from the proposal distribution. + The number of iterations in between returning samples. + When the number of burnInterval iteration is negative. + + + + Gets or sets the number of iterations in between returning samples. + + When burn interval is negative. + + + + This method runs the sampler for a number of iterations without returning a sample + + + + + Returns a sample from the distribution P. + + + + + Metropolis sampling produces samples from distribition P by sampling from a proposal distribution Q + and accepting/rejecting based on the density of P. Metropolis sampling requires that the proposal + distribution Q is symmetric. All densities are required to be in log space. + + The Metropolis sampler is a stateful sampler. It keeps track of where it currently is in the domain + of the distribution P. + + The type of samples this sampler produces. + + + + Evaluates the log density function of the sampling distribution. + + + + + A function which samples from a proposal distribution. + + + + + The current location of the sampler. + + + + + The log density at the current location. + + + + + The number of burn iterations between two samples. + + + + + Constructs a new Metropolis sampler using the default random number generator. + + The initial sample. + The log density of the distribution we want to sample from. + A method that samples from the symmetric proposal distribution. + The number of iterations in between returning samples. + When the number of burnInterval iteration is negative. + + + + Gets or sets the number of iterations in between returning samples. + + When burn interval is negative. + + + + This method runs the sampler for a number of iterations without returning a sample + + + + + Returns a sample from the distribution P. + + + + + Rejection sampling produces samples from distribition P by sampling from a proposal distribution Q + and accepting/rejecting based on the density of P and Q. The density of P and Q don't need to + to be normalized, but we do need that for each x, P(x) < Q(x). + + The type of samples this sampler produces. + + + + Evaluates the density function of the sampling distribution. + + + + + Evaluates the density function of the proposal distribution. + + + + + A function which samples from a proposal distribution. + + + + + Constructs a new rejection sampler using the default random number generator. + + The density of the distribution we want to sample from. + The density of the proposal distribution. + A method that samples from the proposal distribution. + + + + Returns a sample from the distribution P. + + When the algorithms detects that the proposal + distribution doesn't upper bound the target distribution. + + + + A hybrid Monte Carlo sampler for univariate distributions. + + + + + Distribution to sample momentum from. + + + + + Standard deviations used in the sampling of the + momentum. + + + + + Gets or sets the standard deviation used in the sampling of the + momentum. + + When standard deviation is negative. + + + + Constructs a new Hybrid Monte Carlo sampler for a univariate probability distribution. + The momentum will be sampled from a normal distribution with standard deviation + specified by pSdv using the default random + number generator. A three point estimation will be used for differentiation. + This constructor will set the burn interval. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + The standard deviation of the normal distribution that is used to sample + the momentum. + When the number of burnInterval iteration is negative. + + + + Constructs a new Hybrid Monte Carlo sampler for a univariate probability distribution. + The momentum will be sampled from a normal distribution with standard deviation + specified by pSdv using a random + number generator provided by the user. A three point estimation will be used for differentiation. + This constructor will set the burn interval. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + The standard deviation of the normal distribution that is used to sample + the momentum. + Random number generator used to sample the momentum. + When the number of burnInterval iteration is negative. + + + + Constructs a new Hybrid Monte Carlo sampler for a multivariate probability distribution. + The momentum will be sampled from a normal distribution with standard deviation + given by pSdv using a random + number generator provided by the user. This constructor will set both the burn interval and the method used for + numerical differentiation. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + The standard deviation of the normal distribution that is used to sample + the momentum. + The method used for numerical differentiation. + Random number generator used for sampling the momentum. + When the number of burnInterval iteration is negative. + + + + Use for copying objects in the Burn method. + + The source of copying. + A copy of the source object. + + + + Use for creating temporary objects in the Burn method. + + An object of type T. + + + + + + + + + + + + + Samples the momentum from a normal distribution. + + The momentum to be randomized. + + + + The default method used for computing the derivative. Uses a simple three point estimation. + + Function for which the derivative is to be evaluated. + The location where the derivative is to be evaluated. + The derivative of the function at the point x. + + + + Slice sampling produces samples from distribition P by uniformly sampling from under the pdf of P using + a technique described in "Slice Sampling", R. Neal, 2003. All densities are required to be in log space. + + The slice sampler is a stateful sampler. It keeps track of where it currently is in the domain + of the distribution P. + + + + + Evaluates the log density function of the target distribution. + + + + + The current location of the sampler. + + + + + The log density at the current location. + + + + + The number of burn iterations between two samples. + + + + + The scale of the slice sampler. + + + + + Constructs a new Slice sampler using the default random + number generator. The burn interval will be set to 0. + + The initial sample. + The density of the distribution we want to sample from. + The scale factor of the slice sampler. + When the scale of the slice sampler is not positive. + + + + Constructs a new slice sampler using the default random number generator. It + will set the number of burnInterval iterations and run a burnInterval phase. + + The initial sample. + The density of the distribution we want to sample from. + The number of iterations in between returning samples. + The scale factor of the slice sampler. + When the number of burnInterval iteration is negative. + When the scale of the slice sampler is not positive. + + + + Gets or sets the number of iterations in between returning samples. + + When burn interval is negative. + + + + Gets or sets the scale of the slice sampler. + + + + + This method runs the sampler for a number of iterations without returning a sample + + + + + Returns a sample from the distribution P. + + + + + Extension methods to return basic statistics on set of data. + + + + + Returns the minimum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Returns the minimum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Returns the minimum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + Null-entries are ignored. + + The sample data. + The minimum value in the sample data. + + + + Returns the maximum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The maximum value in the sample data. + + + + Returns the maximum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The maximum value in the sample data. + + + + Returns the maximum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + Null-entries are ignored. + + The sample data. + The maximum value in the sample data. + + + + Returns the minimum absolute value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Returns the minimum absolute value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Returns the maximum absolute value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The maximum value in the sample data. + + + + Returns the maximum absolute value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The maximum value in the sample data. + + + + Returns the minimum magnitude and phase value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Returns the minimum magnitude and phase value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Returns the maximum magnitude and phase value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Returns the maximum magnitude and phase value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Evaluates the sample mean, an estimate of the population mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the mean of. + The mean of the sample. + + + + Evaluates the sample mean, an estimate of the population mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the mean of. + The mean of the sample. + + + + Evaluates the sample mean, an estimate of the population mean. + Returns NaN if data is empty or if any entry is NaN. + Null-entries are ignored. + + The data to calculate the mean of. + The mean of the sample. + + + + Evaluates the geometric mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the geometric mean of. + The geometric mean of the sample. + + + + Evaluates the geometric mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the geometric mean of. + The geometric mean of the sample. + + + + Evaluates the harmonic mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the harmonic mean of. + The harmonic mean of the sample. + + + + Evaluates the harmonic mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the harmonic mean of. + The harmonic mean of the sample. + + + + Estimates the unbiased population variance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population variance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population variance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + Null-entries are ignored. + + A subset of samples, sampled from the full population. + + + + Evaluates the variance from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + The full population data. + + + + Evaluates the variance from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + The full population data. + + + + Evaluates the variance from the provided full population. + On a dataset of size N will use an N normalize and would thus be biased if applied to a subsetr. + Returns NaN if data is empty or if any entry is NaN. + Null-entries are ignored. + + The full population data. + + + + Estimates the unbiased population standard deviation from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population standard deviation from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population standard deviation from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + Null-entries are ignored. + + A subset of samples, sampled from the full population. + + + + Evaluates the standard deviation from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + The full population data. + + + + Evaluates the standard deviation from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + The full population data. + + + + Evaluates the standard deviation from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + Null-entries are ignored. + + The full population data. + + + + Estimates the unbiased population skewness from the provided samples. + Uses a normalizer (Bessel's correction; type 2). + Returns NaN if data has less than three entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population skewness from the provided samples. + Uses a normalizer (Bessel's correction; type 2). + Returns NaN if data has less than three entries or if any entry is NaN. + Null-entries are ignored. + + A subset of samples, sampled from the full population. + + + + Evaluates the skewness from the full population. + Does not use a normalizer and would thus be biased if applied to a subset (type 1). + Returns NaN if data has less than two entries or if any entry is NaN. + + The full population data. + + + + Evaluates the skewness from the full population. + Does not use a normalizer and would thus be biased if applied to a subset (type 1). + Returns NaN if data has less than two entries or if any entry is NaN. + Null-entries are ignored. + + The full population data. + + + + Estimates the unbiased population kurtosis from the provided samples. + Uses a normalizer (Bessel's correction; type 2). + Returns NaN if data has less than four entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population kurtosis from the provided samples. + Uses a normalizer (Bessel's correction; type 2). + Returns NaN if data has less than four entries or if any entry is NaN. + Null-entries are ignored. + + A subset of samples, sampled from the full population. + + + + Evaluates the kurtosis from the full population. + Does not use a normalizer and would thus be biased if applied to a subset (type 1). + Returns NaN if data has less than three entries or if any entry is NaN. + + The full population data. + + + + Evaluates the kurtosis from the full population. + Does not use a normalizer and would thus be biased if applied to a subset (type 1). + Returns NaN if data has less than three entries or if any entry is NaN. + Null-entries are ignored. + + The full population data. + + + + Estimates the sample mean and the unbiased population variance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or if any entry is NaN and NaN for variance if data has less than two entries or if any entry is NaN. + + The data to calculate the mean of. + The mean of the sample. + + + + Estimates the sample mean and the unbiased population variance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or if any entry is NaN and NaN for variance if data has less than two entries or if any entry is NaN. + + The data to calculate the mean of. + The mean of the sample. + + + + Estimates the sample mean and the unbiased population standard deviation from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or if any entry is NaN and NaN for standard deviation if data has less than two entries or if any entry is NaN. + + The data to calculate the mean of. + The mean of the sample. + + + + Estimates the sample mean and the unbiased population standard deviation from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or if any entry is NaN and NaN for standard deviation if data has less than two entries or if any entry is NaN. + + The data to calculate the mean of. + The mean of the sample. + + + + Estimates the unbiased population skewness and kurtosis from the provided samples in a single pass. + Uses a normalizer (Bessel's correction; type 2). + + A subset of samples, sampled from the full population. + + + + Evaluates the skewness and kurtosis from the full population. + Does not use a normalizer and would thus be biased if applied to a subset (type 1). + + The full population data. + + + + Estimates the unbiased population covariance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population covariance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population covariance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + Null-entries are ignored. + + A subset of samples, sampled from the full population. + A subset of samples, sampled from the full population. + + + + Evaluates the population covariance from the provided full populations. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + The full population data. + The full population data. + + + + Evaluates the population covariance from the provided full populations. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + The full population data. + The full population data. + + + + Evaluates the population covariance from the provided full populations. + On a dataset of size N will use an N normalize and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + Null-entries are ignored. + + The full population data. + The full population data. + + + + Evaluates the root mean square (RMS) also known as quadratic mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the RMS of. + + + + Evaluates the root mean square (RMS) also known as quadratic mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the RMS of. + + + + Evaluates the root mean square (RMS) also known as quadratic mean. + Returns NaN if data is empty or if any entry is NaN. + Null-entries are ignored. + + The data to calculate the mean of. + + + + Estimates the sample median from the provided samples (R8). + + The data sample sequence. + + + + Estimates the sample median from the provided samples (R8). + + The data sample sequence. + + + + Estimates the sample median from the provided samples (R8). + + The data sample sequence. + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the p-Percentile value from the provided samples. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + Percentile selector, between 0 and 100 (inclusive). + + + + Estimates the p-Percentile value from the provided samples. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + Percentile selector, between 0 and 100 (inclusive). + + + + Estimates the p-Percentile value from the provided samples. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + Percentile selector, between 0 and 100 (inclusive). + + + + Estimates the p-Percentile value from the provided samples. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the p-Percentile value from the provided samples. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the p-Percentile value from the provided samples. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the first quartile value from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the first quartile value from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the first quartile value from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the third quartile value from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the third quartile value from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the third quartile value from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the inter-quartile range from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the inter-quartile range from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the inter-quartile range from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates {min, lower-quantile, median, upper-quantile, max} from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates {min, lower-quantile, median, upper-quantile, max} from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates {min, lower-quantile, median, upper-quantile, max} from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Returns the order statistic (order 1..N) from the provided samples. + + The data sample sequence. + One-based order of the statistic, must be between 1 and N (inclusive). + + + + Returns the order statistic (order 1..N) from the provided samples. + + The data sample sequence. + One-based order of the statistic, must be between 1 and N (inclusive). + + + + Returns the order statistic (order 1..N) from the provided samples. + + The data sample sequence. + + + + Returns the order statistic (order 1..N) from the provided samples. + + The data sample sequence. + + + + Evaluates the rank of each entry of the provided samples. + The rank definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Evaluates the rank of each entry of the provided samples. + The rank definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Evaluates the rank of each entry of the provided samples. + The rank definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Estimates the quantile tau from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile value. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Estimates the quantile tau from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile value. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Estimates the quantile tau from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile value. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Estimates the quantile tau from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Estimates the quantile tau from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Estimates the quantile tau from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Estimates the empirical cumulative distribution function (CDF) at x from the provided samples. + + The data sample sequence. + The value where to estimate the CDF at. + + + + Estimates the empirical cumulative distribution function (CDF) at x from the provided samples. + + The data sample sequence. + The value where to estimate the CDF at. + + + + Estimates the empirical cumulative distribution function (CDF) at x from the provided samples. + + The data sample sequence. + The value where to estimate the CDF at. + + + + Estimates the empirical cumulative distribution function (CDF) at x from the provided samples. + + The data sample sequence. + + + + Estimates the empirical cumulative distribution function (CDF) at x from the provided samples. + + The data sample sequence. + + + + Estimates the empirical cumulative distribution function (CDF) at x from the provided samples. + + The data sample sequence. + + + + Estimates the empirical inverse CDF at tau from the provided samples. + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + + + + Estimates the empirical inverse CDF at tau from the provided samples. + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + + + + Estimates the empirical inverse CDF at tau from the provided samples. + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + + + + Estimates the empirical inverse CDF at tau from the provided samples. + + The data sample sequence. + + + + Estimates the empirical inverse CDF at tau from the provided samples. + + The data sample sequence. + + + + Estimates the empirical inverse CDF at tau from the provided samples. + + The data sample sequence. + + + + Calculates the entropy of a stream of double values in bits. + Returns NaN if any of the values in the stream are NaN. + + The data sample sequence. + + + + Calculates the entropy of a stream of double values in bits. + Returns NaN if any of the values in the stream are NaN. + Null-entries are ignored. + + The data sample sequence. + + + + Evaluates the sample mean over a moving window, for each samples. + Returns NaN if no data is empty or if any entry is NaN. + + The sample stream to calculate the mean of. + The number of last samples to consider. + + + + Class to represent a permutation for a subset of the natural numbers. + + + + + Entry _indices[i] represents the location to which i is permuted to. + + + + + Initializes a new instance of the Permutation class. + + An array which represents where each integer is permuted too: indices[i] represents that integer i + is permuted to location indices[i]. + + + + Gets the number of elements this permutation is over. + + + + + Computes where permutes too. + + The index to permute from. + The index which is permuted to. + + + + Computes the inverse of the permutation. + + The inverse of the permutation. + + + + Construct an array from a sequence of inversions. + + + From wikipedia: the permutation 12043 has the inversions (0,2), (1,2) and (3,4). This would be + encoded using the array [22244]. + + The set of inversions to construct the permutation from. + A permutation generated from a sequence of inversions. + + + + Construct a sequence of inversions from the permutation. + + + From wikipedia: the permutation 12043 has the inversions (0,2), (1,2) and (3,4). This would be + encoded using the array [22244]. + + A sequence of inversions. + + + + Checks whether the array represents a proper permutation. + + An array which represents where each integer is permuted too: indices[i] represents that integer i + is permuted to location indices[i]. + True if represents a proper permutation, false otherwise. + + + + Globalized String Handling Helpers + + + + + Tries to get a from the format provider, + returning the current culture if it fails. + + + An that supplies culture-specific + formatting information. + + A instance. + + + + Tries to get a from the format + provider, returning the current culture if it fails. + + + An that supplies culture-specific + formatting information. + + A instance. + + + + Tries to get a from the format provider, returning the current culture if it fails. + + + An that supplies culture-specific + formatting information. + + A instance. + + + + Globalized Parsing: Tokenize a node by splitting it into several nodes. + + Node that contains the trimmed string to be tokenized. + List of keywords to tokenize by. + keywords to skip looking for (because they've already been handled). + + + + Globalized Parsing: Parse a double number + + First token of the number. + Culture Info. + The parsed double number using the given culture information. + + + + + Globalized Parsing: Parse a float number + + First token of the number. + Culture Info. + The parsed float number using the given culture information. + + + + + Numerical Integration (Quadrature). + + + + + Approximation of the definite integral of an analytic smooth function on a closed interval. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + The expected relative accuracy of the approximation. + Approximation of the finite integral in the given interval. + + + + Approximation of the definite integral of an analytic smooth function on a closed interval. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Approximation of the finite integral in the given interval. + + + + Approximates a 2-dimensional definite integral using an Nth order Gauss-Legendre rule over the rectangle [a,b] x [c,d]. + + The 2-dimensional analytic smooth function to integrate. + Where the interval starts for the first (inside) integral, exclusive and finite. + Where the interval ends for the first (inside) integral, exclusive and finite. + Where the interval starts for the second (outside) integral, exclusive and finite. + /// Where the interval ends for the second (outside) integral, exclusive and finite. + Defines an Nth order Gauss-Legendre rule. The order also defines the number of abscissas and weights for the rule. Precomputed Gauss-Legendre abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024 are used, otherwise they're calulcated on the fly. + Approximation of the finite integral in the given interval. + + + + Approximates a 2-dimensional definite integral using an Nth order Gauss-Legendre rule over the rectangle [a,b] x [c,d]. + + The 2-dimensional analytic smooth function to integrate. + Where the interval starts for the first (inside) integral, exclusive and finite. + Where the interval ends for the first (inside) integral, exclusive and finite. + Where the interval starts for the second (outside) integral, exclusive and finite. + /// Where the interval ends for the second (outside) integral, exclusive and finite. + Approximation of the finite integral in the given interval. + + + + Interpolation Factory. + + + + + Creates an interpolation based on arbitrary points. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.Barycentric.InterpolateRationalFloaterHormannSorted + instead, which is more efficient. + + + + + Create a floater hormann rational pole-free interpolation based on arbitrary points. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.Barycentric.InterpolateRationalFloaterHormannSorted + instead, which is more efficient. + + + + + Create a Bulirsch Stoer rational interpolation based on arbitrary points. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.BulirschStoerRationalInterpolation.InterpolateSorted + instead, which is more efficient. + + + + + Create a barycentric polynomial interpolation where the given sample points are equidistant. + + The sample points t, must be equidistant. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.Barycentric.InterpolatePolynomialEquidistantSorted + instead, which is more efficient. + + + + + Create a Neville polynomial interpolation based on arbitrary points. + If the points happen to be equidistant, consider to use the much more robust PolynomialEquidistant instead. + Otherwise, consider whether RationalWithoutPoles would not be a more robust alternative. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.NevillePolynomialInterpolation.InterpolateSorted + instead, which is more efficient. + + + + + Create a piecewise linear interpolation based on arbitrary points. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.LinearSpline.InterpolateSorted + instead, which is more efficient. + + + + + Create piecewise log-linear interpolation based on arbitrary points. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.LogLinear.InterpolateSorted + instead, which is more efficient. + + + + + Create an piecewise natural cubic spline interpolation based on arbitrary points, + with zero secondary derivatives at the boundaries. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.CubicSpline.InterpolateNaturalSorted + instead, which is more efficient. + + + + + Create an piecewise cubic Akima spline interpolation based on arbitrary points. + Akima splines are robust to outliers. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.CubicSpline.InterpolateAkimaSorted + instead, which is more efficient. + + + + + Create a piecewise cubic Hermite spline interpolation based on arbitrary points + and their slopes/first derivative. + + The sample points t. + The sample point values x(t). + The slope at the sample points. Optimized for arrays. + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.CubicSpline.InterpolateHermiteSorted + instead, which is more efficient. + + + + + Create a step-interpolation based on arbitrary points. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.StepInterpolation.InterpolateSorted + instead, which is more efficient. + + + + + Support Interface for Precision Operations (like AlmostEquals). + + Type of the implementing class. + + + + Returns a Norm of a value of this type, which is appropriate for measuring how + close this value is to zero. + + A norm of this value. + + + + Returns a Norm of the difference of two values of this type, which is + appropriate for measuring how close together these two values are. + + The value to compare with. + A norm of the difference between this and the other value. + + + + Sorting algorithms for single, tuple and triple lists. + + + + + Sort a list of keys, in place using the quick sort algorithm using the quick sort algorithm. + + The type of elements in the key list. + List to sort. + Comparison, defining the sort order. + + + + Sort a list of keys and items with respect to the keys, in place using the quick sort algorithm. + + The type of elements in the key list. + The type of elements in the item list. + List to sort. + List to permute the same way as the key list. + Comparison, defining the sort order. + + + + Sort a list of keys, items1 and items2 with respect to the keys, in place using the quick sort algorithm. + + The type of elements in the key list. + The type of elements in the first item list. + The type of elements in the second item list. + List to sort. + First list to permute the same way as the key list. + Second list to permute the same way as the key list. + Comparison, defining the sort order. + + + + Sort a range of a list of keys, in place using the quick sort algorithm. + + The type of element in the list. + List to sort. + The zero-based starting index of the range to sort. + The length of the range to sort. + Comparison, defining the sort order. + + + + Sort a list of keys and items with respect to the keys, in place using the quick sort algorithm. + + The type of elements in the key list. + The type of elements in the item list. + List to sort. + List to permute the same way as the key list. + The zero-based starting index of the range to sort. + The length of the range to sort. + Comparison, defining the sort order. + + + + Sort a list of keys, items1 and items2 with respect to the keys, in place using the quick sort algorithm. + + The type of elements in the key list. + The type of elements in the first item list. + The type of elements in the second item list. + List to sort. + First list to permute the same way as the key list. + Second list to permute the same way as the key list. + The zero-based starting index of the range to sort. + The length of the range to sort. + Comparison, defining the sort order. + + + + Sort a list of keys and items with respect to the keys, in place using the quick sort algorithm. + + The type of elements in the primary list. + The type of elements in the secondary list. + List to sort. + List to sort on duplicate primary items, and permute the same way as the key list. + Comparison, defining the primary sort order. + Comparison, defining the secondary sort order. + + + + Recursive implementation for an in place quick sort on a list. + + The type of the list on which the quick sort is performed. + The list which is sorted using quick sort. + The method with which to compare two elements of the quick sort. + The left boundary of the quick sort. + The right boundary of the quick sort. + + + + Recursive implementation for an in place quick sort on a list while reordering one other list accordingly. + + The type of the list on which the quick sort is performed. + The type of the list which is automatically reordered accordingly. + The list which is sorted using quick sort. + The list which is automatically reordered accordingly. + The method with which to compare two elements of the quick sort. + The left boundary of the quick sort. + The right boundary of the quick sort. + + + + Recursive implementation for an in place quick sort on one list while reordering two other lists accordingly. + + The type of the list on which the quick sort is performed. + The type of the first list which is automatically reordered accordingly. + The type of the second list which is automatically reordered accordingly. + The list which is sorted using quick sort. + The first list which is automatically reordered accordingly. + The second list which is automatically reordered accordingly. + The method with which to compare two elements of the quick sort. + The left boundary of the quick sort. + The right boundary of the quick sort. + + + + Recursive implementation for an in place quick sort on the primary and then by the secondary list while reordering one secondary list accordingly. + + The type of the primary list. + The type of the secondary list. + The list which is sorted using quick sort. + The list which is sorted secondarily (on primary duplicates) and automatically reordered accordingly. + The method with which to compare two elements of the primary list. + The method with which to compare two elements of the secondary list. + The left boundary of the quick sort. + The right boundary of the quick sort. + + + + Performs an in place swap of two elements in a list. + + The type of elements stored in the list. + The list in which the elements are stored. + The index of the first element of the swap. + The index of the second element of the swap. + + + + Used to simplify parallel code, particularly between the .NET 4.0 and Silverlight Code. + + + + + Executes a for loop in which iterations may run in parallel. + + The start index, inclusive. + The end index, exclusive. + The body to be invoked for each iteration range. + + + + Executes a for loop in which iterations may run in parallel. + + The start index, inclusive. + The end index, exclusive. + The partition size for splitting work into smaller pieces. + The body to be invoked for each iteration range. + + + + Executes each of the provided actions inside a discrete, asynchronous task. + + An array of actions to execute. + The actions array contains a null element. + At least one invocation of the actions threw an exception. + + + + Selects an item (such as Max or Min). + + Starting index of the loop. + Ending index of the loop + The function to select items over a subset. + The function to select the item of selection from the subsets. + The selected value. + + + + Selects an item (such as Max or Min). + + The array to iterate over. + The function to select items over a subset. + The function to select the item of selection from the subsets. + The selected value. + + + + Selects an item (such as Max or Min). + + Starting index of the loop. + Ending index of the loop + The function to select items over a subset. + The function to select the item of selection from the subsets. + Default result of the reduce function on an empty set. + The selected value. + + + + Selects an item (such as Max or Min). + + The array to iterate over. + The function to select items over a subset. + The function to select the item of selection from the subsets. + Default result of the reduce function on an empty set. + The selected value. + + + + Double-precision trigonometry toolkit. + + + + + Constant to convert a degree to grad. + + + + + Converts a degree (360-periodic) angle to a grad (400-periodic) angle. + + The degree to convert. + The converted grad angle. + + + + Converts a degree (360-periodic) angle to a radian (2*Pi-periodic) angle. + + The degree to convert. + The converted radian angle. + + + + Converts a grad (400-periodic) angle to a degree (360-periodic) angle. + + The grad to convert. + The converted degree. + + + + Converts a grad (400-periodic) angle to a radian (2*Pi-periodic) angle. + + The grad to convert. + The converted radian. + + + + Converts a radian (2*Pi-periodic) angle to a degree (360-periodic) angle. + + The radian to convert. + The converted degree. + + + + Converts a radian (2*Pi-periodic) angle to a grad (400-periodic) angle. + + The radian to convert. + The converted grad. + + + + Normalized Sinc function. sinc(x) = sin(pi*x)/(pi*x). + + + + + Trigonometric Sine of an angle in radian, or opposite / hypotenuse. + + The angle in radian. + The sine of the radian angle. + + + + Trigonometric Sine of a Complex number. + + The complex value. + The sine of the complex number. + + + + Trigonometric Cosine of an angle in radian, or adjacent / hypotenuse. + + The angle in radian. + The cosine of an angle in radian. + + + + Trigonometric Cosine of a Complex number. + + The complex value. + The cosine of a complex number. + + + + Trigonometric Tangent of an angle in radian, or opposite / adjacent. + + The angle in radian. + The tangent of the radian angle. + + + + Trigonometric Tangent of a Complex number. + + The complex value. + The tangent of the complex number. + + + + Trigonometric Cotangent of an angle in radian, or adjacent / opposite. Reciprocal of the tangent. + + The angle in radian. + The cotangent of an angle in radian. + + + + Trigonometric Cotangent of a Complex number. + + The complex value. + The cotangent of the complex number. + + + + Trigonometric Secant of an angle in radian, or hypotenuse / adjacent. Reciprocal of the cosine. + + The angle in radian. + The secant of the radian angle. + + + + Trigonometric Secant of a Complex number. + + The complex value. + The secant of the complex number. + + + + Trigonometric Cosecant of an angle in radian, or hypotenuse / opposite. Reciprocal of the sine. + + The angle in radian. + Cosecant of an angle in radian. + + + + Trigonometric Cosecant of a Complex number. + + The complex value. + The cosecant of a complex number. + + + + Trigonometric principal Arc Sine in radian + + The opposite for a unit hypotenuse (i.e. opposite / hyptenuse). + The angle in radian. + + + + Trigonometric principal Arc Sine of this Complex number. + + The complex value. + The arc sine of a complex number. + + + + Trigonometric principal Arc Cosine in radian + + The adjacent for a unit hypotenuse (i.e. adjacent / hypotenuse). + The angle in radian. + + + + Trigonometric principal Arc Cosine of this Complex number. + + The complex value. + The arc cosine of a complex number. + + + + Trigonometric principal Arc Tangent in radian + + The opposite for a unit adjacent (i.e. opposite / adjacent). + The angle in radian. + + + + Trigonometric principal Arc Tangent of this Complex number. + + The complex value. + The arc tangent of a complex number. + + + + Trigonometric principal Arc Cotangent in radian + + The adjacent for a unit opposite (i.e. adjacent / opposite). + The angle in radian. + + + + Trigonometric principal Arc Cotangent of this Complex number. + + The complex value. + The arc cotangent of a complex number. + + + + Trigonometric principal Arc Secant in radian + + The hypotenuse for a unit adjacent (i.e. hypotenuse / adjacent). + The angle in radian. + + + + Trigonometric principal Arc Secant of this Complex number. + + The complex value. + The arc secant of a complex number. + + + + Trigonometric principal Arc Cosecant in radian + + The hypotenuse for a unit opposite (i.e. hypotenuse / opposite). + The angle in radian. + + + + Trigonometric principal Arc Cosecant of this Complex number. + + The complex value. + The arc cosecant of a complex number. + + + + Hyperbolic Sine + + The hyperbolic angle, i.e. the area of the hyperbolic sector. + The hyperbolic sine of the angle. + + + + Hyperbolic Sine of a Complex number. + + The complex value. + The hyperbolic sine of a complex number. + + + + Hyperbolic Cosine + + The hyperbolic angle, i.e. the area of the hyperbolic sector. + The hyperbolic Cosine of the angle. + + + + Hyperbolic Cosine of a Complex number. + + The complex value. + The hyperbolic cosine of a complex number. + + + + Hyperbolic Tangent in radian + + The hyperbolic angle, i.e. the area of the hyperbolic sector. + The hyperbolic tangent of the angle. + + + + Hyperbolic Tangent of a Complex number. + + The complex value. + The hyperbolic tangent of a complex number. + + + + Hyperbolic Cotangent + + The hyperbolic angle, i.e. the area of the hyperbolic sector. + The hyperbolic cotangent of the angle. + + + + Hyperbolic Cotangent of a Complex number. + + The complex value. + The hyperbolic cotangent of a complex number. + + + + Hyperbolic Secant + + The hyperbolic angle, i.e. the area of the hyperbolic sector. + The hyperbolic secant of the angle. + + + + Hyperbolic Secant of a Complex number. + + The complex value. + The hyperbolic secant of a complex number. + + + + Hyperbolic Cosecant + + The hyperbolic angle, i.e. the area of the hyperbolic sector. + The hyperbolic cosecant of the angle. + + + + Hyperbolic Cosecant of a Complex number. + + The complex value. + The hyperbolic cosecant of a complex number. + + + + Hyperbolic Area Sine + + The real value. + The hyperbolic angle, i.e. the area of its hyperbolic sector. + + + + Hyperbolic Area Sine of this Complex number. + + The complex value. + The hyperbolic arc sine of a complex number. + + + + Hyperbolic Area Cosine + + The real value. + The hyperbolic angle, i.e. the area of its hyperbolic sector. + + + + Hyperbolic Area Cosine of this Complex number. + + The complex value. + The hyperbolic arc cosine of a complex number. + + + + Hyperbolic Area Tangent + + The real value. + The hyperbolic angle, i.e. the area of its hyperbolic sector. + + + + Hyperbolic Area Tangent of this Complex number. + + The complex value. + The hyperbolic arc tangent of a complex number. + + + + Hyperbolic Area Cotangent + + The real value. + The hyperbolic angle, i.e. the area of its hyperbolic sector. + + + + Hyperbolic Area Cotangent of this Complex number. + + The complex value. + The hyperbolic arc cotangent of a complex number. + + + + Hyperbolic Area Secant + + The real value. + The hyperbolic angle, i.e. the area of its hyperbolic sector. + + + + Hyperbolic Area Secant of this Complex number. + + The complex value. + The hyperbolic arc secant of a complex number. + + + + Hyperbolic Area Cosecant + + The real value. + The hyperbolic angle, i.e. the area of its hyperbolic sector. + + + + Hyperbolic Area Cosecant of this Complex number. + + The complex value. + The hyperbolic arc cosecant of a complex number. + + + + Hamming window. Named after Richard Hamming. + Symmetric version, useful e.g. for filter design purposes. + + + + + Hamming window. Named after Richard Hamming. + Periodic version, useful e.g. for FFT purposes. + + + + + Hann window. Named after Julius von Hann. + Symmetric version, useful e.g. for filter design purposes. + + + + + Hann window. Named after Julius von Hann. + Periodic version, useful e.g. for FFT purposes. + + + + + Cosine window. + Symmetric version, useful e.g. for filter design purposes. + + + + + Cosine window. + Periodic version, useful e.g. for FFT purposes. + + + + + Lanczos window. + Symmetric version, useful e.g. for filter design purposes. + + + + + Lanczos window. + Periodic version, useful e.g. for FFT purposes. + + + + + Gauss window. + + + + + Blackman window. + + + + + Blackman-Harris window. + + + + + Blackman-Nuttall window. + + + + + Bartlett window. + + + + + Bartlett-Hann window. + + + + + Nuttall window. + + + + + Flat top window. + + + + + Uniform rectangular (dirichlet) window. + + + + + Triangular window. + + +
+
diff --git a/src/packages/MathNet.Numerics.3.16.0/lib/portable-net4+sl5+netcore45+wpa81+wp8+MonoAndroid1+MonoTouch1/MathNet.Numerics.XML b/src/packages/MathNet.Numerics.3.16.0/lib/portable-net4+sl5+netcore45+wpa81+wp8+MonoAndroid1+MonoTouch1/MathNet.Numerics.XML new file mode 100644 index 0000000..9c9b21f --- /dev/null +++ b/src/packages/MathNet.Numerics.3.16.0/lib/portable-net4+sl5+netcore45+wpa81+wp8+MonoAndroid1+MonoTouch1/MathNet.Numerics.XML @@ -0,0 +1,49706 @@ + + + + MathNet.Numerics + + + + + Useful extension methods for Arrays. + + + + + Copies the values from on array to another. + + The source array. + The destination array. + + + + Copies the values from on array to another. + + The source array. + The destination array. + + + + Copies the values from on array to another. + + The source array. + The destination array. + + + + Copies the values from on array to another. + + The source array. + The destination array. + + + + Enumerative Combinatorics and Counting. + + + + + Count the number of possible variations without repetition. + The order matters and each object can be chosen only once. + + Number of elements in the set. + Number of elements to choose from the set. Each element is chosen at most once. + Maximum number of distinct variations. + + + + Count the number of possible variations with repetition. + The order matters and each object can be chosen more than once. + + Number of elements in the set. + Number of elements to choose from the set. Each element is chosen 0, 1 or multiple times. + Maximum number of distinct variations with repetition. + + + + Count the number of possible combinations without repetition. + The order does not matter and each object can be chosen only once. + + Number of elements in the set. + Number of elements to choose from the set. Each element is chosen at most once. + Maximum number of combinations. + + + + Count the number of possible combinations with repetition. + The order does not matter and an object can be chosen more than once. + + Number of elements in the set. + Number of elements to choose from the set. Each element is chosen 0, 1 or multiple times. + Maximum number of combinations with repetition. + + + + Count the number of possible permutations (without repetition). + + Number of (distinguishable) elements in the set. + Maximum number of permutations without repetition. + + + + Generate a random permutation, without repetition, by generating the index numbers 0 to N-1 and shuffle them randomly. + Implemented using Fisher-Yates Shuffling. + + An array of length N that contains (in any order) the integers of the interval [0, N). + Number of (distinguishable) elements in the set. + The random number generator to use. Optional; the default random source will be used if null. + + + + Select a random permutation, without repetition, from a data array by reordering the provided array in-place. + Implemented using Fisher-Yates Shuffling. The provided data array will be modified. + + The data array to be reordered. The array will be modified by this routine. + The random number generator to use. Optional; the default random source will be used if null. + + + + Select a random permutation from a data sequence by returning the provided data in random order. + Implemented using Fisher-Yates Shuffling. + + The data elements to be reordered. + The random number generator to use. Optional; the default random source will be used if null. + + + + Generate a random combination, without repetition, by randomly selecting some of N elements. + + Number of elements in the set. + The random number generator to use. Optional; the default random source will be used if null. + Boolean mask array of length N, for each item true if it is selected. + + + + Generate a random combination, without repetition, by randomly selecting k of N elements. + + Number of elements in the set. + Number of elements to choose from the set. Each element is chosen at most once. + The random number generator to use. Optional; the default random source will be used if null. + Boolean mask array of length N, for each item true if it is selected. + + + + Select a random combination, without repetition, from a data sequence by selecting k elements in original order. + + The data source to choose from. + Number of elements (k) to choose from the data set. Each element is chosen at most once. + The random number generator to use. Optional; the default random source will be used if null. + The chosen combination, in the original order. + + + + Generates a random combination, with repetition, by randomly selecting k of N elements. + + Number of elements in the set. + Number of elements to choose from the set. Elements can be chosen more than once. + The random number generator to use. Optional; the default random source will be used if null. + Integer mask array of length N, for each item the number of times it was selected. + + + + Select a random combination, with repetition, from a data sequence by selecting k elements in original order. + + The data source to choose from. + Number of elements (k) to choose from the data set. Elements can be chosen more than once. + The random number generator to use. Optional; the default random source will be used if null. + The chosen combination with repetition, in the original order. + + + + Generate a random variation, without repetition, by randomly selecting k of n elements with order. + Implemented using partial Fisher-Yates Shuffling. + + Number of elements in the set. + Number of elements to choose from the set. Each element is chosen at most once. + The random number generator to use. Optional; the default random source will be used if null. + An array of length K that contains the indices of the selections as integers of the interval [0, N). + + + + Select a random variation, without repetition, from a data sequence by randomly selecting k elements in random order. + Implemented using partial Fisher-Yates Shuffling. + + The data source to choose from. + Number of elements (k) to choose from the set. Each element is chosen at most once. + The random number generator to use. Optional; the default random source will be used if null. + The chosen variation, in random order. + + + + Generate a random variation, with repetition, by randomly selecting k of n elements with order. + + Number of elements in the set. + Number of elements to choose from the set. Elements can be chosen more than once. + The random number generator to use. Optional; the default random source will be used if null. + An array of length K that contains the indices of the selections as integers of the interval [0, N). + + + + Select a random variation, with repetition, from a data sequence by randomly selecting k elements in random order. + + The data source to choose from. + Number of elements (k) to choose from the data set. Elements can be chosen more than once. + The random number generator to use. Optional; the default random source will be used if null. + The chosen variation with repetition, in random order. + + + + 32-bit single precision complex numbers class. + + + + The class Complex32 provides all elementary operations + on complex numbers. All the operators +, -, + *, /, ==, != are defined in the + canonical way. Additional complex trigonometric functions + are also provided. Note that the Complex32 structures + has two special constant values and + . + + + + Complex32 x = new Complex32(1f,2f); + Complex32 y = Complex32.FromPolarCoordinates(1f, Math.Pi); + Complex32 z = (x + y) / (x - y); + + + + For mathematical details about complex numbers, please + have a look at the + Wikipedia + + + + + + The real component of the complex number. + + + + + The imaginary component of the complex number. + + + + + Initializes a new instance of the Complex32 structure with the given real + and imaginary parts. + + The value for the real component. + The value for the imaginary component. + + + + Creates a complex number from a point's polar coordinates. + + A complex number. + The magnitude, which is the distance from the origin (the intersection of the x-axis and the y-axis) to the number. + The phase, which is the angle from the line to the horizontal axis, measured in radians. + + + + Returns a new instance + with a real number equal to zero and an imaginary number equal to zero. + + + + + Returns a new instance + with a real number equal to one and an imaginary number equal to zero. + + + + + Returns a new instance + with a real number equal to zero and an imaginary number equal to one. + + + + + Returns a new instance + with real and imaginary numbers positive infinite. + + + + + Returns a new instance + with real and imaginary numbers not a number. + + + + + Gets the real component of the complex number. + + The real component of the complex number. + + + + Gets the real imaginary component of the complex number. + + The real imaginary component of the complex number. + + + + Gets the phase or argument of this Complex32. + + + Phase always returns a value bigger than negative Pi and + smaller or equal to Pi. If this Complex32 is zero, the Complex32 + is assumed to be positive real with an argument of zero. + + The phase or argument of this Complex32 + + + + Gets the magnitude (or absolute value) of a complex number. + + Assuming that magnitude of (inf,a) and (a,inf) and (inf,inf) is inf and (NaN,a), (a,NaN) and (NaN,NaN) is NaN + The magnitude of the current instance. + + + + Gets the squared magnitude (or squared absolute value) of a complex number. + + The squared magnitude of the current instance. + + + + Gets the unity of this complex (same argument, but on the unit circle; exp(I*arg)) + + The unity of this Complex32. + + + + Gets a value indicating whether the Complex32 is zero. + + true if this instance is zero; otherwise, false. + + + + Gets a value indicating whether the Complex32 is one. + + true if this instance is one; otherwise, false. + + + + Gets a value indicating whether the Complex32 is the imaginary unit. + + true if this instance is ImaginaryOne; otherwise, false. + + + + Gets a value indicating whether the provided Complex32evaluates + to a value that is not a number. + + + true if this instance is ; otherwise, + false. + + + + + Gets a value indicating whether the provided Complex32 evaluates to an + infinite value. + + + true if this instance is infinite; otherwise, false. + + + True if it either evaluates to a complex infinity + or to a directed infinity. + + + + + Gets a value indicating whether the provided Complex32 is real. + + true if this instance is a real number; otherwise, false. + + + + Gets a value indicating whether the provided Complex32 is real and not negative, that is >= 0. + + + true if this instance is real nonnegative number; otherwise, false. + + + + + Exponential of this Complex32 (exp(x), E^x). + + + The exponential of this complex number. + + + + + Natural Logarithm of this Complex32 (Base E). + + The natural logarithm of this complex number. + + + + Common Logarithm of this Complex32 (Base 10). + + The common logarithm of this complex number. + + + + Logarithm of this Complex32 with custom base. + + The logarithm of this complex number. + + + + Raise this Complex32 to the given value. + + + The exponent. + + + The complex number raised to the given exponent. + + + + + Raise this Complex32 to the inverse of the given value. + + + The root exponent. + + + The complex raised to the inverse of the given exponent. + + + + + The Square (power 2) of this Complex32 + + + The square of this complex number. + + + + + The Square Root (power 1/2) of this Complex32 + + + The square root of this complex number. + + + + + Evaluate all square roots of this Complex32. + + + + + Evaluate all cubic roots of this Complex32. + + + + + Equality test. + + One of complex numbers to compare. + The other complex numbers to compare. + true if the real and imaginary components of the two complex numbers are equal; false otherwise. + + + + Inequality test. + + One of complex numbers to compare. + The other complex numbers to compare. + true if the real or imaginary components of the two complex numbers are not equal; false otherwise. + + + + Unary addition. + + The complex number to operate on. + Returns the same complex number. + + + + Unary minus. + + The complex number to operate on. + The negated value of the . + + + Addition operator. Adds two complex numbers together. + The result of the addition. + One of the complex numbers to add. + The other complex numbers to add. + + + Subtraction operator. Subtracts two complex numbers. + The result of the subtraction. + The complex number to subtract from. + The complex number to subtract. + + + Addition operator. Adds a complex number and float together. + The result of the addition. + The complex numbers to add. + The float value to add. + + + Subtraction operator. Subtracts float value from a complex value. + The result of the subtraction. + The complex number to subtract from. + The float value to subtract. + + + Addition operator. Adds a complex number and float together. + The result of the addition. + The float value to add. + The complex numbers to add. + + + Subtraction operator. Subtracts complex value from a float value. + The result of the subtraction. + The float vale to subtract from. + The complex value to subtract. + + + Multiplication operator. Multiplies two complex numbers. + The result of the multiplication. + One of the complex numbers to multiply. + The other complex number to multiply. + + + Multiplication operator. Multiplies a complex number with a float value. + The result of the multiplication. + The float value to multiply. + The complex number to multiply. + + + Multiplication operator. Multiplies a complex number with a float value. + The result of the multiplication. + The complex number to multiply. + The float value to multiply. + + + Division operator. Divides a complex number by another. + Enhanced Smith's algorithm for dividing two complex numbers + + The result of the division. + The dividend. + The divisor. + + + + Helper method for dividing. + + Re first + Im first + Re second + Im second + + + + + Division operator. Divides a float value by a complex number. + Algorithm based on Smith's algorithm + + The result of the division. + The dividend. + The divisor. + + + Division operator. Divides a complex number by a float value. + The result of the division. + The dividend. + The divisor. + + + + Computes the conjugate of a complex number and returns the result. + + + + + Returns the multiplicative inverse of a complex number. + + + + + Converts the value of the current complex number to its equivalent string representation in Cartesian form. + + The string representation of the current instance in Cartesian form. + + + + Converts the value of the current complex number to its equivalent string representation + in Cartesian form by using the specified format for its real and imaginary parts. + + The string representation of the current instance in Cartesian form. + A standard or custom numeric format string. + + is not a valid format string. + + + + Converts the value of the current complex number to its equivalent string representation + in Cartesian form by using the specified culture-specific formatting information. + + The string representation of the current instance in Cartesian form, as specified by . + An object that supplies culture-specific formatting information. + + + Converts the value of the current complex number to its equivalent string representation + in Cartesian form by using the specified format and culture-specific format information for its real and imaginary parts. + The string representation of the current instance in Cartesian form, as specified by and . + A standard or custom numeric format string. + An object that supplies culture-specific formatting information. + + is not a valid format string. + + + + Checks if two complex numbers are equal. Two complex numbers are equal if their + corresponding real and imaginary components are equal. + + + Returns true if the two objects are the same object, or if their corresponding + real and imaginary components are equal, false otherwise. + + + The complex number to compare to with. + + + + + The hash code for the complex number. + + + The hash code of the complex number. + + + The hash code is calculated as + System.Math.Exp(ComplexMath.Absolute(complexNumber)). + + + + + Checks if two complex numbers are equal. Two complex numbers are equal if their + corresponding real and imaginary components are equal. + + + Returns true if the two objects are the same object, or if their corresponding + real and imaginary components are equal, false otherwise. + + + The complex number to compare to with. + + + + + Creates a complex number based on a string. The string can be in the + following formats (without the quotes): 'n', 'ni', 'n +/- ni', + 'ni +/- n', 'n,n', 'n,ni,' '(n,n)', or '(n,ni)', where n is a float. + + + A complex number containing the value specified by the given string. + + + the string to parse. + + + An that supplies culture-specific + formatting information. + + + + + Parse a part (real or complex) from a complex number. + + Start Token. + Is set to true if the part identified itself as being imaginary. + + An that supplies culture-specific + formatting information. + + Resulting part as float. + + + + + Converts the string representation of a complex number to a single-precision complex number equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex number to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will contain complex32.Zero. This parameter is passed uninitialized + + + + + Converts the string representation of a complex number to single-precision complex number equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex number to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will contain complex32.Zero. This parameter is passed uninitialized + + + + + Explicit conversion of a real decimal to a Complex32. + + The decimal value to convert. + The result of the conversion. + + + + Explicit conversion of a Complex to a Complex32. + + The decimal value to convert. + The result of the conversion. + + + + Implicit conversion of a real byte to a Complex32. + + The byte value to convert. + The result of the conversion. + + + + Implicit conversion of a real short to a Complex32. + + The short value to convert. + The result of the conversion. + + + + Implicit conversion of a signed byte to a Complex32. + + The signed byte value to convert. + The result of the conversion. + + + + Implicit conversion of a unsgined real short to a Complex32. + + The unsgined short value to convert. + The result of the conversion. + + + + Implicit conversion of a real int to a Complex32. + + The int value to convert. + The result of the conversion. + + + + Implicit conversion of a real long to a Complex32. + + The long value to convert. + The result of the conversion. + + + + Implicit conversion of a real uint to a Complex32. + + The uint value to convert. + The result of the conversion. + + + + Implicit conversion of a real ulong to a Complex32. + + The ulong value to convert. + The result of the conversion. + + + + Implicit conversion of a real float to a Complex32. + + The float value to convert. + The result of the conversion. + + + + Implicit conversion of a real double to a Complex32. + + The double value to convert. + The result of the conversion. + + + + Converts this Complex32 to a . + + A with the same values as this Complex32. + + + + Returns the additive inverse of a specified complex number. + + The result of the real and imaginary components of the value parameter multiplied by -1. + A complex number. + + + + Computes the conjugate of a complex number and returns the result. + + The conjugate of . + A complex number. + + + + Adds two complex numbers and returns the result. + + The sum of and . + The first complex number to add. + The second complex number to add. + + + + Subtracts one complex number from another and returns the result. + + The result of subtracting from . + The value to subtract from (the minuend). + The value to subtract (the subtrahend). + + + + Returns the product of two complex numbers. + + The product of the and parameters. + The first complex number to multiply. + The second complex number to multiply. + + + + Divides one complex number by another and returns the result. + + The quotient of the division. + The complex number to be divided. + The complex number to divide by. + + + + Returns the multiplicative inverse of a complex number. + + The reciprocal of . + A complex number. + + + + Returns the square root of a specified complex number. + + The square root of . + A complex number. + + + + Gets the absolute value (or magnitude) of a complex number. + + The absolute value of . + A complex number. + + + + Returns e raised to the power specified by a complex number. + + The number e raised to the power . + A complex number that specifies a power. + + + + Returns a specified complex number raised to a power specified by a complex number. + + The complex number raised to the power . + A complex number to be raised to a power. + A complex number that specifies a power. + + + + Returns a specified complex number raised to a power specified by a single-precision floating-point number. + + The complex number raised to the power . + A complex number to be raised to a power. + A single-precision floating-point number that specifies a power. + + + + Returns the natural (base e) logarithm of a specified complex number. + + The natural (base e) logarithm of . + A complex number. + + + + Returns the logarithm of a specified complex number in a specified base. + + The logarithm of in base . + A complex number. + The base of the logarithm. + + + + Returns the base-10 logarithm of a specified complex number. + + The base-10 logarithm of . + A complex number. + + + + Returns the sine of the specified complex number. + + The sine of . + A complex number. + + + + Returns the cosine of the specified complex number. + + The cosine of . + A complex number. + + + + Returns the tangent of the specified complex number. + + The tangent of . + A complex number. + + + + Returns the angle that is the arc sine of the specified complex number. + + The angle which is the arc sine of . + A complex number. + + + + Returns the angle that is the arc cosine of the specified complex number. + + The angle, measured in radians, which is the arc cosine of . + A complex number that represents a cosine. + + + + Returns the angle that is the arc tangent of the specified complex number. + + The angle that is the arc tangent of . + A complex number. + + + + Returns the hyperbolic sine of the specified complex number. + + The hyperbolic sine of . + A complex number. + + + + Returns the hyperbolic cosine of the specified complex number. + + The hyperbolic cosine of . + A complex number. + + + + Returns the hyperbolic tangent of the specified complex number. + + The hyperbolic tangent of . + A complex number. + + + + 64-bit double precision complex numbers class. + + + + The class Complex provides all elementary operations + on complex numbers. All the operators +, -, + *, /, ==, != are defined in the + canonical way. Additional complex trigonometric functions + are also provided. Note that the Complex structures + has two special constant values and + . + + + + Complex x = new Complex(1d, 2d); + Complex y = Complex.FromPolarCoordinates(1d, Math.Pi); + Complex z = (x + y) / (x - y); + + + + For mathematical details about complex numbers, please + have a look at the + Wikipedia + + + + + + The real component of the complex number. + + + + + The imaginary component of the complex number. + + + + + Initializes a new instance of the Complex structure with the given real + and imaginary parts. + + The value for the real component. + The value for the imaginary component. + + + + Creates a complex number from a point's polar coordinates. + + A complex number. + The magnitude, which is the distance from the origin (the intersection of the x-axis and the y-axis) to the number. + The phase, which is the angle from the line to the horizontal axis, measured in radians. + + + + Returns a new Complex instance + with a real number equal to zero and an imaginary number equal to zero. + + + + + Returns a new Complex instance + with a real number equal to one and an imaginary number equal to zero. + + + + + Returns a new Complex instance + with a real number equal to zero and an imaginary number equal to one. + + + + + Returns a new Complex instance + with real and imaginary numbers positive infinite. + + + + + Returns a new Complex instance + with real and imaginary numbers not a number. + + + + + Gets the real component of the complex number. + + The real component of the complex number. + + + + Gets the real imaginary component of the complex number. + + The real imaginary component of the complex number. + + + + Gets the phase or argument of this Complex. + + + Phase always returns a value bigger than negative Pi and + smaller or equal to Pi. If this Complex is zero, the Complex + is assumed to be positive real with an argument of zero. + + The phase or argument of this Complex + + + + Gets the magnitude (or absolute value) of a complex number. + + The magnitude of the current instance. + + + + Equality test. + + One of complex numbers to compare. + The other complex numbers to compare. + true if the real and imaginary components of the two complex numbers are equal; false otherwise. + + + + Inequality test. + + One of complex numbers to compare. + The other complex numbers to compare. + true if the real or imaginary components of the two complex numbers are not equal; false otherwise. + + + + Unary addition. + + The complex number to operate on. + Returns the same complex number. + + + + Unary minus. + + The complex number to operate on. + The negated value of the . + + + Addition operator. Adds two complex numbers together. + The result of the addition. + One of the complex numbers to add. + The other complex numbers to add. + + + Subtraction operator. Subtracts two complex numbers. + The result of the subtraction. + The complex number to subtract from. + The complex number to subtract. + + + Addition operator. Adds a complex number and double together. + The result of the addition. + The complex numbers to add. + The double value to add. + + + Subtraction operator. Subtracts double value from a complex value. + The result of the subtraction. + The complex number to subtract from. + The double value to subtract. + + + Addition operator. Adds a complex number and double together. + The result of the addition. + The double value to add. + The complex numbers to add. + + + Subtraction operator. Subtracts complex value from a double value. + The result of the subtraction. + The double vale to subtract from. + The complex value to subtract. + + + Multiplication operator. Multiplies two complex numbers. + The result of the multiplication. + One of the complex numbers to multiply. + The other complex number to multiply. + + + Multiplication operator. Multiplies a complex number with a double value. + The result of the multiplication. + The double value to multiply. + The complex number to multiply. + + + Multiplication operator. Multiplies a complex number with a double value. + The result of the multiplication. + The complex number to multiply. + The double value to multiply. + + + Division operator. Divides a complex number by another. + The result of the division. + The dividend. + The divisor. + + + Division operator. Divides a double value by a complex number. + The result of the division. + The dividend. + The divisor. + + + Division operator. Divides a complex number by a double value. + The result of the division. + The dividend. + The divisor. + + + + A string representation of this complex number. + + + The string representation of this complex number. + + + + + A string representation of this complex number. + + + The string representation of this complex number formatted as specified by the + format string. + + + A format specification. + + + + + A string representation of this complex number. + + + The string representation of this complex number formatted as specified by the + format provider. + + + An that supplies culture-specific formatting information. + + + + + A string representation of this complex number. + + + The string representation of this complex number formatted as specified by the + format string and format provider. + + + if the n, is not a number. + + + if s, is . + + + A format specification. + + + An that supplies culture-specific formatting information. + + + + + Checks if two complex numbers are equal. Two complex numbers are equal if their + corresponding real and imaginary components are equal. + + + Returns true if the two objects are the same object, or if their corresponding + real and imaginary components are equal, false otherwise. + + + The complex number to compare to with. + + + + + The hash code for the complex number. + + + The hash code of the complex number. + + + The hash code is calculated as + System.Math.Exp(ComplexMath.Absolute(complexNumber)). + + + + + Checks if two complex numbers are equal. Two complex numbers are equal if their + corresponding real and imaginary components are equal. + + + Returns true if the two objects are the same object, or if their corresponding + real and imaginary components are equal, false otherwise. + + + The complex number to compare to with. + + + + + Returns a Norm of a value of this type, which is appropriate for measuring how + close this value is to zero. + + + A norm of this value. + + + + + Returns a Norm of the difference of two values of this type, which is + appropriate for measuring how close together these two values are. + + + The value to compare with. + + + A norm of the difference between this and the other value. + + + + + Creates a complex number based on a string. The string can be in the + following formats (without the quotes): 'n', 'ni', 'n +/- ni', + 'ni +/- n', 'n,n', 'n,ni,' '(n,n)', or '(n,ni)', where n is a double. + + + A complex number containing the value specified by the given string. + + + The string to parse. + + + + + Creates a complex number based on a string. The string can be in the + following formats (without the quotes): 'n', 'ni', 'n +/- ni', + 'ni +/- n', 'n,n', 'n,ni,' '(n,n)', or '(n,ni)', where n is a double. + + + A complex number containing the value specified by the given string. + + + the string to parse. + + + An that supplies culture-specific + formatting information. + + + + + Parse a part (real or complex) from a complex number. + + Start Token. + Is set to true if the part identified itself as being imaginary. + + An that supplies culture-specific + formatting information. + + Resulting part as double. + + + + + Converts the string representation of a complex number to a single-precision complex number equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex number to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will contain complex32.Zero. This parameter is passed uninitialized + + + + + Converts the string representation of a complex number to single-precision complex number equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex number to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will contain complex32.Zero. This parameter is passed uninitialized + + + + + Explicit conversion of a real decimal to a Complex. + + The decimal value to convert. + The result of the conversion. + + + + Explicit conversion of a Complex to a Complex. + + The decimal value to convert. + The result of the conversion. + + + + Implicit conversion of a real byte to a Complex. + + The byte value to convert. + The result of the conversion. + + + + Implicit conversion of a real short to a Complex. + + The short value to convert. + The result of the conversion. + + + + Implicit conversion of a signed byte to a Complex. + + The signed byte value to convert. + The result of the conversion. + + + + Implicit conversion of a unsgined real short to a Complex. + + The unsgined short value to convert. + The result of the conversion. + + + + Implicit conversion of a real int to a Complex. + + The int value to convert. + The result of the conversion. + + + + Implicit conversion of a real long to a Complex. + + The long value to convert. + The result of the conversion. + + + + Implicit conversion of a real uint to a Complex. + + The uint value to convert. + The result of the conversion. + + + + Implicit conversion of a real ulong to a Complex. + + The ulong value to convert. + The result of the conversion. + + + + Implicit conversion of a real double to a Complex. + + The double value to convert. + The result of the conversion. + + + + Implicit conversion of a real float to a Complex. + + The double value to convert. + The result of the conversion. + + + + Converts this Complex to a . + + A with the same values as this Complex. + + + + Returns the additive inverse of a specified complex number. + + The result of the and components of the parameter multiplied by -1. + A complex number. + + + + Computes the conjugate of a complex number and returns the result. + + The conjugate of . + A complex number. + + + + Adds two complex numbers and returns the result. + + The sum of and . + The first complex number to add. + The second complex number to add. + + + + Subtracts one complex number from another and returns the result. + + The result of subtracting from . + The value to subtract from (the minuend). + The value to subtract (the subtrahend). + + + + Returns the product of two complex numbers. + + The product of the and parameters. + The first complex number to multiply. + The second complex number to multiply. + + + + Divides one complex number by another and returns the result. + + The quotient of the division. + The complex number to be divided. + The complex number to divide by. + + + + Returns the multiplicative inverse of a complex number. + + The reciprocal of . + A complex number. + + + + Returns the square root of a specified complex number. + + The square root of . + A complex number. + + + + Gets the absolute value (or magnitude) of a complex number. + + A complex number. + The absolute value (or magnitude) of a complex number. + + + + Returns e raised to the power specified by a complex number. + + The number e raised to the power . + A complex number that specifies a power. + + + + Returns a specified complex number raised to a power specified by a complex number. + + The complex number raised to the power . + A complex number to be raised to a power. + A complex number that specifies a power. + + + + Returns a specified complex number raised to a power specified by a double-precision floating-point number. + + The complex number raised to the power . + A complex number to be raised to a power. + A double-precision floating-point number that specifies a power. + + + + Returns the natural (base e) logarithm of a specified complex number. + + The natural (base e) logarithm of . + A complex number. + + + + Returns the logarithm of a specified complex number in a specified base. + + The logarithm of in base . + A complex number. + The base of the logarithm. + + + + Returns the base-10 logarithm of a specified complex number. + + The base-10 logarithm of . + A complex number. + + + + Returns the sine of the specified complex number. + + The sine of . + A complex number. + + + + Returns the cosine of the specified complex number. + + The cosine of . + A complex number. + + + + Returns the tangent of the specified complex number. + + The tangent of . + A complex number. + + + + Returns the angle that is the arc sine of the specified complex number. + + The angle which is the arc sine of . + A complex number. + + + + Returns the angle that is the arc cosine of the specified complex number. + + The angle, measured in radians, which is the arc cosine of . + A complex number that represents a cosine. + + + + Returns the angle that is the arc tangent of the specified complex number. + + The angle that is the arc tangent of . + A complex number. + + + + Returns the hyperbolic sine of the specified complex number. + + The hyperbolic sine of . + A complex number. + + + + Returns the hyperbolic cosine of the specified complex number. + + The hyperbolic cosine of . + A complex number. + + + + Returns the hyperbolic tangent of the specified complex number. + + The hyperbolic tangent of . + A complex number. + + + + Extension methods for the Complex type provided by System.Numerics + + + + + Gets the squared magnitude of the Complex number. + + The number to perfom this operation on. + The squared magnitude of the Complex number. + + + + Gets the unity of this complex (same argument, but on the unit circle; exp(I*arg)) + + The unity of this Complex. + + + + Gets the conjugate of the Complex number. + + The number to perfom this operation on. + + The semantic of setting the conjugate is such that + + // a, b of type Complex32 + a.Conjugate = b; + + is equivalent to + + // a, b of type Complex32 + a = b.Conjugate + + + The conjugate of the number. + + + + Returns the multiplicative inverse of a complex number. + + + + + Exponential of this Complex (exp(x), E^x). + + The number to perfom this operation on. + + The exponential of this complex number. + + + + + Natural Logarithm of this Complex (Base E). + + The number to perfom this operation on. + + The natural logarithm of this complex number. + + + + + Common Logarithm of this Complex (Base 10). + + The common logarithm of this complex number. + + + + Logarithm of this Complex with custom base. + + The logarithm of this complex number. + + + + Raise this Complex to the given value. + + The number to perfom this operation on. + + The exponent. + + + The complex number raised to the given exponent. + + + + + Raise this Complex to the inverse of the given value. + + The number to perfom this operation on. + + The root exponent. + + + The complex raised to the inverse of the given exponent. + + + + + The Square (power 2) of this Complex + + The number to perfom this operation on. + + The square of this complex number. + + + + + The Square Root (power 1/2) of this Complex + + The number to perfom this operation on. + + The square root of this complex number. + + + + + Evaluate all square roots of this Complex. + + + + + Evaluate all cubic roots of this Complex. + + + + + Gets a value indicating whether the Complex32 is zero. + + The number to perfom this operation on. + true if this instance is zero; otherwise, false. + + + + Gets a value indicating whether the Complex32 is one. + + The number to perfom this operation on. + true if this instance is one; otherwise, false. + + + + Gets a value indicating whether the Complex32 is the imaginary unit. + + true if this instance is ImaginaryOne; otherwise, false. + The number to perfom this operation on. + + + + Gets a value indicating whether the provided Complex32evaluates + to a value that is not a number. + + The number to perfom this operation on. + + true if this instance is NaN; otherwise, + false. + + + + + Gets a value indicating whether the provided Complex32 evaluates to an + infinite value. + + The number to perfom this operation on. + + true if this instance is infinite; otherwise, false. + + + True if it either evaluates to a complex infinity + or to a directed infinity. + + + + + Gets a value indicating whether the provided Complex32 is real. + + The number to perfom this operation on. + true if this instance is a real number; otherwise, false. + + + + Gets a value indicating whether the provided Complex32 is real and not negative, that is >= 0. + + The number to perfom this operation on. + + true if this instance is real nonnegative number; otherwise, false. + + + + + Returns a Norm of a value of this type, which is appropriate for measuring how + close this value is to zero. + + + + + Returns a Norm of a value of this type, which is appropriate for measuring how + close this value is to zero. + + + + + Returns a Norm of the difference of two values of this type, which is + appropriate for measuring how close together these two values are. + + + + + Returns a Norm of the difference of two values of this type, which is + appropriate for measuring how close together these two values are. + + + + + Creates a complex number based on a string. The string can be in the + following formats (without the quotes): 'n', 'ni', 'n +/- ni', + 'ni +/- n', 'n,n', 'n,ni,' '(n,n)', or '(n,ni)', where n is a double. + + + A complex number containing the value specified by the given string. + + + The string to parse. + + + + + Creates a complex number based on a string. The string can be in the + following formats (without the quotes): 'n', 'ni', 'n +/- ni', + 'ni +/- n', 'n,n', 'n,ni,' '(n,n)', or '(n,ni)', where n is a double. + + + A complex number containing the value specified by the given string. + + + the string to parse. + + + An that supplies culture-specific + formatting information. + + + + + Parse a part (real or complex) from a complex number. + + Start Token. + Is set to true if the part identified itself as being imaginary. + + An that supplies culture-specific + formatting information. + + Resulting part as double. + + + + + Converts the string representation of a complex number to a double-precision complex number equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex number to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will contain Complex.Zero. This parameter is passed uninitialized. + + + + + Converts the string representation of a complex number to double-precision complex number equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex number to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will contain complex32.Zero. This parameter is passed uninitialized + + + + + Creates a Complex32 number based on a string. The string can be in the + following formats (without the quotes): 'n', 'ni', 'n +/- ni', + 'ni +/- n', 'n,n', 'n,ni,' '(n,n)', or '(n,ni)', where n is a double. + + + A complex number containing the value specified by the given string. + + + the string to parse. + + + + + Creates a Complex32 number based on a string. The string can be in the + following formats (without the quotes): 'n', 'ni', 'n +/- ni', + 'ni +/- n', 'n,n', 'n,ni,' '(n,n)', or '(n,ni)', where n is a double. + + + A complex number containing the value specified by the given string. + + + the string to parse. + + + An that supplies culture-specific + formatting information. + + + + + Converts the string representation of a complex number to a single-precision complex number equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex number to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will contain complex32.Zero. This parameter is passed uninitialized. + + + + + Converts the string representation of a complex number to single-precision complex number equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex number to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will contain Complex.Zero. This parameter is passed uninitialized. + + + + + A collection of frequently used mathematical constants. + + + + The number e + + + The number log[2](e) + + + The number log[10](e) + + + The number log[e](2) + + + The number log[e](10) + + + The number log[e](pi) + + + The number log[e](2*pi)/2 + + + The number 1/e + + + The number sqrt(e) + + + The number sqrt(2) + + + The number sqrt(3) + + + The number sqrt(1/2) = 1/sqrt(2) = sqrt(2)/2 + + + The number sqrt(3)/2 + + + The number pi + + + The number pi*2 + + + The number pi/2 + + + The number pi*3/2 + + + The number pi/4 + + + The number sqrt(pi) + + + The number sqrt(2pi) + + + The number sqrt(2*pi*e) + + + The number log(sqrt(2*pi)) + + + The number log(sqrt(2*pi*e)) + + + The number log(2 * sqrt(e / pi)) + + + The number 1/pi + + + The number 2/pi + + + The number 1/sqrt(pi) + + + The number 1/sqrt(2pi) + + + The number 2/sqrt(pi) + + + The number 2 * sqrt(e / pi) + + + The number (pi)/180 - factor to convert from Degree (deg) to Radians (rad). + + + + + The number (pi)/200 - factor to convert from NewGrad (grad) to Radians (rad). + + + + + The number ln(10)/20 - factor to convert from Power Decibel (dB) to Neper (Np). Use this version when the Decibel represent a power gain but the compared values are not powers (e.g. amplitude, current, voltage). + + + The number ln(10)/10 - factor to convert from Neutral Decibel (dB) to Neper (Np). Use this version when either both or neither of the Decibel and the compared values represent powers. + + + The Catalan constant + Sum(k=0 -> inf){ (-1)^k/(2*k + 1)2 } + + + The Euler-Mascheroni constant + lim(n -> inf){ Sum(k=1 -> n) { 1/k - log(n) } } + + + The number (1+sqrt(5))/2, also known as the golden ratio + + + The Glaisher constant + e^(1/12 - Zeta(-1)) + + + The Khinchin constant + prod(k=1 -> inf){1+1/(k*(k+2))^log(k,2)} + + + + The size of a double in bytes. + + + + + The size of an int in bytes. + + + + + The size of a float in bytes. + + + + + The size of a Complex in bytes. + + + + + The size of a Complex in bytes. + + + + Speed of Light in Vacuum: c_0 = 2.99792458e8 [m s^-1] (defined, exact; 2007 CODATA) + + + Magnetic Permeability in Vacuum: mu_0 = 4*Pi * 10^-7 [N A^-2 = kg m A^-2 s^-2] (defined, exact; 2007 CODATA) + + + Electric Permittivity in Vacuum: epsilon_0 = 1/(mu_0*c_0^2) [F m^-1 = A^2 s^4 kg^-1 m^-3] (defined, exact; 2007 CODATA) + + + Characteristic Impedance of Vacuum: Z_0 = mu_0*c_0 [Ohm = m^2 kg s^-3 A^-2] (defined, exact; 2007 CODATA) + + + Newtonian Constant of Gravitation: G = 6.67429e-11 [m^3 kg^-1 s^-2] (2007 CODATA) + + + Planck's constant: h = 6.62606896e-34 [J s = m^2 kg s^-1] (2007 CODATA) + + + Reduced Planck's constant: h_bar = h / (2*Pi) [J s = m^2 kg s^-1] (2007 CODATA) + + + Planck mass: m_p = (h_bar*c_0/G)^(1/2) [kg] (2007 CODATA) + + + Planck temperature: T_p = (h_bar*c_0^5/G)^(1/2)/k [K] (2007 CODATA) + + + Planck length: l_p = h_bar/(m_p*c_0) [m] (2007 CODATA) + + + Planck time: t_p = l_p/c_0 [s] (2007 CODATA) + + + Elementary Electron Charge: e = 1.602176487e-19 [C = A s] (2007 CODATA) + + + Magnetic Flux Quantum: theta_0 = h/(2*e) [Wb = m^2 kg s^-2 A^-1] (2007 CODATA) + + + Conductance Quantum: G_0 = 2*e^2/h [S = m^-2 kg^-1 s^3 A^2] (2007 CODATA) + + + Josephson Constant: K_J = 2*e/h [Hz V^-1] (2007 CODATA) + + + Von Klitzing Constant: R_K = h/e^2 [Ohm = m^2 kg s^-3 A^-2] (2007 CODATA) + + + Bohr Magneton: mu_B = e*h_bar/2*m_e [J T^-1] (2007 CODATA) + + + Nuclear Magneton: mu_N = e*h_bar/2*m_p [J T^-1] (2007 CODATA) + + + Fine Structure Constant: alpha = e^2/4*Pi*e_0*h_bar*c_0 [1] (2007 CODATA) + + + Rydberg Constant: R_infty = alpha^2*m_e*c_0/2*h [m^-1] (2007 CODATA) + + + Bor Radius: a_0 = alpha/4*Pi*R_infty [m] (2007 CODATA) + + + Hartree Energy: E_h = 2*R_infty*h*c_0 [J] (2007 CODATA) + + + Quantum of Circulation: h/2*m_e [m^2 s^-1] (2007 CODATA) + + + Fermi Coupling Constant: G_F/(h_bar*c_0)^3 [GeV^-2] (2007 CODATA) + + + Weak Mixin Angle: sin^2(theta_W) [1] (2007 CODATA) + + + Electron Mass: [kg] (2007 CODATA) + + + Electron Mass Energy Equivalent: [J] (2007 CODATA) + + + Electron Molar Mass: [kg mol^-1] (2007 CODATA) + + + Electron Compton Wavelength: [m] (2007 CODATA) + + + Classical Electron Radius: [m] (2007 CODATA) + + + Tomson Cross Section: [m^2] (2002 CODATA) + + + Electron Magnetic Moment: [J T^-1] (2007 CODATA) + + + Electon G-Factor: [1] (2007 CODATA) + + + Muon Mass: [kg] (2007 CODATA) + + + Muon Mass Energy Equivalent: [J] (2007 CODATA) + + + Muon Molar Mass: [kg mol^-1] (2007 CODATA) + + + Muon Compton Wavelength: [m] (2007 CODATA) + + + Muon Magnetic Moment: [J T^-1] (2007 CODATA) + + + Muon G-Factor: [1] (2007 CODATA) + + + Tau Mass: [kg] (2007 CODATA) + + + Tau Mass Energy Equivalent: [J] (2007 CODATA) + + + Tau Molar Mass: [kg mol^-1] (2007 CODATA) + + + Tau Compton Wavelength: [m] (2007 CODATA) + + + Proton Mass: [kg] (2007 CODATA) + + + Proton Mass Energy Equivalent: [J] (2007 CODATA) + + + Proton Molar Mass: [kg mol^-1] (2007 CODATA) + + + Proton Compton Wavelength: [m] (2007 CODATA) + + + Proton Magnetic Moment: [J T^-1] (2007 CODATA) + + + Proton G-Factor: [1] (2007 CODATA) + + + Proton Shielded Magnetic Moment: [J T^-1] (2007 CODATA) + + + Proton Gyro-Magnetic Ratio: [s^-1 T^-1] (2007 CODATA) + + + Proton Shielded Gyro-Magnetic Ratio: [s^-1 T^-1] (2007 CODATA) + + + Neutron Mass: [kg] (2007 CODATA) + + + Neutron Mass Energy Equivalent: [J] (2007 CODATA) + + + Neutron Molar Mass: [kg mol^-1] (2007 CODATA) + + + Neuron Compton Wavelength: [m] (2007 CODATA) + + + Neutron Magnetic Moment: [J T^-1] (2007 CODATA) + + + Neutron G-Factor: [1] (2007 CODATA) + + + Neutron Gyro-Magnetic Ratio: [s^-1 T^-1] (2007 CODATA) + + + Deuteron Mass: [kg] (2007 CODATA) + + + Deuteron Mass Energy Equivalent: [J] (2007 CODATA) + + + Deuteron Molar Mass: [kg mol^-1] (2007 CODATA) + + + Deuteron Magnetic Moment: [J T^-1] (2007 CODATA) + + + Helion Mass: [kg] (2007 CODATA) + + + Helion Mass Energy Equivalent: [J] (2007 CODATA) + + + Helion Molar Mass: [kg mol^-1] (2007 CODATA) + + + Avogadro constant: [mol^-1] (2010 CODATA) + + + The SI prefix factor corresponding to 1 000 000 000 000 000 000 000 000 + + + The SI prefix factor corresponding to 1 000 000 000 000 000 000 000 + + + The SI prefix factor corresponding to 1 000 000 000 000 000 000 + + + The SI prefix factor corresponding to 1 000 000 000 000 000 + + + The SI prefix factor corresponding to 1 000 000 000 000 + + + The SI prefix factor corresponding to 1 000 000 000 + + + The SI prefix factor corresponding to 1 000 000 + + + The SI prefix factor corresponding to 1 000 + + + The SI prefix factor corresponding to 100 + + + The SI prefix factor corresponding to 10 + + + The SI prefix factor corresponding to 0.1 + + + The SI prefix factor corresponding to 0.01 + + + The SI prefix factor corresponding to 0.001 + + + The SI prefix factor corresponding to 0.000 001 + + + The SI prefix factor corresponding to 0.000 000 001 + + + The SI prefix factor corresponding to 0.000 000 000 001 + + + The SI prefix factor corresponding to 0.000 000 000 000 001 + + + The SI prefix factor corresponding to 0.000 000 000 000 000 001 + + + The SI prefix factor corresponding to 0.000 000 000 000 000 000 001 + + + The SI prefix factor corresponding to 0.000 000 000 000 000 000 000 001 + + + + Sets parameters for the library. + + + + + Use a specific provider if configured, e.g. using + environment variables, or fall back to the best providers. + + + + + Use the best provider available. + + + + + Gets or sets a value indicating whether the distribution classes check validate each parameter. + For the multivariate distributions this could involve an expensive matrix factorization. + The default setting of this property is true. + + + + + Gets or sets a value indicating whether to use thread safe random number generators (RNG). + Thread safe RNG about two and half time slower than non-thread safe RNG. + + + true to use thread safe random number generators ; otherwise, false. + + + + + Optional path to try to load native provider binaries from. + + + + + Gets or sets the linear algebra provider. Consider to use UseNativeMKL or UseManaged instead. + + The linear algebra provider. + + + + Gets or sets the fourier transform provider. Consider to use UseNativeMKL or UseManaged instead. + + The linear algebra provider. + + + + Gets or sets a value indicating how many parallel worker threads shall be used + when parallelization is applicable. + + Default to the number of processor cores, must be between 1 and 1024 (inclusive). + + + + Gets or sets the TaskScheduler used to schedule the worker tasks. + + + + + Gets or sets the the block size to use for + the native linear algebra provider. + + The block size. Default 512, must be at least 32. + + + + Gets or sets the order of the matrix when linear algebra provider + must calculate multiply in parallel threads. + + The order. Default 64, must be at least 3. + + + + Gets or sets the number of elements a vector or matrix + must contain before we multiply threads. + + Number of elements. Default 300, must be at least 3. + + + + Numerical Derivative. + + + + + Initialized a NumericalDerivative with the given points and center. + + + + + Initialized a NumericalDerivative with the default points and center for the given order. + + + + + Evaluates the derivative of a scalar univariate function. + + Univariate function handle. + Point at which to evaluate the derivative. + Derivative order. + + + + Creates a function handle for the derivative of a scalar univariate function. + + Univariate function handle. + Derivative order. + + + + Evaluates the first derivative of a scalar univariate function. + + Univariate function handle. + Point at which to evaluate the derivative. + + + + Creates a function handle for the first derivative of a scalar univariate function. + + Univariate function handle. + + + + Evaluates the second derivative of a scalar univariate function. + + Univariate function handle. + Point at which to evaluate the derivative. + + + + Creates a function handle for the second derivative of a scalar univariate function. + + Univariate function handle. + + + + Evaluates the partial derivative of a multivariate function. + + Multivariate function handle. + Vector at which to evaluate the derivative. + Index of independent variable for partial derivative. + Derivative order. + + + + Creates a function handle for the partial derivative of a multivariate function. + + Multivariate function handle. + Index of independent variable for partial derivative. + Derivative order. + + + + Evaluates the first partial derivative of a multivariate function. + + Multivariate function handle. + Vector at which to evaluate the derivative. + Index of independent variable for partial derivative. + + + + Creates a function handle for the first partial derivative of a multivariate function. + + Multivariate function handle. + Index of independent variable for partial derivative. + + + + Evaluates the partial derivative of a bivariate function. + + Bivariate function handle. + First argument at which to evaluate the derivative. + Second argument at which to evaluate the derivative. + Index of independent variable for partial derivative. + Derivative order. + + + + Creates a function handle for the partial derivative of a bivariate function. + + Bivariate function handle. + Index of independent variable for partial derivative. + Derivative order. + + + + Evaluates the first partial derivative of a bivariate function. + + Bivariate function handle. + First argument at which to evaluate the derivative. + Second argument at which to evaluate the derivative. + Index of independent variable for partial derivative. + + + + Creates a function handle for the first partial derivative of a bivariate function. + + Bivariate function handle. + Index of independent variable for partial derivative. + + + + Class to calculate finite difference coefficients using Taylor series expansion method. + + + For n points, coefficients are calculated up to the maximum derivative order possible (n-1). + The current function value position specifies the "center" for surrounding coefficients. + Selecting the first, middle or last positions represent forward, backwards and central difference methods. + + + + + + + Number of points for finite difference coefficients. Changing this value recalculates the coefficients table. + + + + + Initializes a new instance of the class. + + Number of finite difference coefficients. + + + + Gets the finite difference coefficients for a specified center and order. + + Current function position with respect to coefficients. Must be within point range. + Order of finite difference coefficients. + Vector of finite difference coefficients. + + + + Gets the finite difference coefficients for all orders at a specified center. + + Current function position with respect to coefficients. Must be within point range. + Rectangular array of coefficients, with columns specifing order. + + + + Type of finite different step size. + + + + + The absolute step size value will be used in numerical derivatives, regardless of order or function parameters. + + + + + A base step size value, h, will be scaled according to the function input parameter. A common example is hx = h*(1+abs(x)), however + this may vary depending on implementation. This definition only guarantees that the only scaling will be relative to the + function input parameter and not the order of the finite difference derivative. + + + + + A base step size value, eps (typically machine precision), is scaled according to the finite difference coefficient order + and function input parameter. The initial scaling according to finite different coefficient order can be thought of as producing a + base step size, h, that is equivalent to scaling. This stepsize is then scaled according to the function + input parameter. Although implementation may vary, an example of second order accurate scaling may be (eps)^(1/3)*(1+abs(x)). + + + + + Class to evaluate the numerical derivative of a function using finite difference approximations. + Variable point and center methods can be initialized . + This class can also be used to return function handles (delegates) for a fixed derivative order and variable. + It is possible to evaluate the derivative and partial derivative of univariate and multivariate functions respectively. + + + + + Initializes a NumericalDerivative class with the default 3 point center difference method. + + + + + Initialized a NumericalDerivative class. + + Number of points for finite difference derivatives. + Location of the center with respect to other points. Value ranges from zero to points-1. + + + + Sets and gets the finite difference step size. This value is for each function evaluation if relative stepsize types are used. + If the base step size used in scaling is desired, see . + + + Setting then getting the StepSize may return a different value. This is not unusual since a user-defined step size is converted to a + base-2 representable number to improve finite difference accuracy. + + + + + Sets and gets the base fininte difference step size. This assigned value to this parameter is only used if is set to RelativeX. + However, if the StepType is Relative, it will contain the base step size computed from based on the finite difference order. + + + + + Sets and gets the base finite difference step size. This parameter is only used if is set to Relative. + By default this is set to machine epsilon, from which is computed. + + + + + Sets and gets the location of the center point for the finite difference derivative. + + + + + Number of times a function is evaluated for numerical derivatives. + + + + + Type of step size for computing finite differences. If set to absolute, dx = h. + If set to relative, dx = (1+abs(x))*h^(2/(order+1)). This provides accurate results when + h is approximately equal to the square-root of machine accuracy, epsilon. + + + + + Evaluates the derivative of equidistant points using the finite difference method. + + Vector of points StepSize apart. + Derivative order. + Finite difference step size. + Derivative of points of the specified order. + + + + Evaluates the derivative of a scalar univariate function. + + + Supplying the optional argument currentValue will reduce the number of function evaluations + required to calculate the finite difference derivative. + + Function handle. + Point at which to compute the derivative. + Derivative order. + Current function value at center. + Function derivative at x of the specified order. + + + + Creates a function handle for the derivative of a scalar univariate function. + + Input function handle. + Derivative order. + Function handle that evaluates the derivative of input function at a fixed order. + + + + Evaluates the partial derivative of a multivariate function. + + Multivariate function handle. + Vector at which to evaluate the derivative. + Index of independent variable for partial derivative. + Derivative order. + Current function value at center. + Function partial derivative at x of the specified order. + + + + Evaluates the partial derivatives of a multivariate function array. + + + This function assumes the input vector x is of the correct length for f. + + Multivariate vector function array handle. + Vector at which to evaluate the derivatives. + Index of independent variable for partial derivative. + Derivative order. + Current function value at center. + Vector of functions partial derivatives at x of the specified order. + + + + Creates a function handle for the partial derivative of a multivariate function. + + Input function handle. + Index of the independent variable for partial derivative. + Derivative order. + Function handle that evaluates partial derivative of input function at a fixed order. + + + + Creates a function handle for the partial derivative of a vector multivariate function. + + Input function handle. + Index of the independent variable for partial derivative. + Derivative order. + Function handle that evaluates partial derivative of input function at fixed order. + + + + Evaluates the mixed partial derivative of variable order for multivariate functions. + + + This function recursively uses to evaluate mixed partial derivative. + Therefore, it is more efficient to call for higher order derivatives of + a single independent variable. + + Multivariate function handle. + Points at which to evaluate the derivative. + Vector of indices for the independent variables at descending derivative orders. + Highest order of differentiation. + Current function value at center. + Function mixed partial derivative at x of the specified order. + + + + Evaluates the mixed partial derivative of variable order for multivariate function arrays. + + + This function recursively uses to evaluate mixed partial derivative. + Therefore, it is more efficient to call for higher order derivatives of + a single independent variable. + + Multivariate function array handle. + Vector at which to evaluate the derivative. + Vector of indices for the independent variables at descending derivative orders. + Highest order of differentiation. + Current function value at center. + Function mixed partial derivatives at x of the specified order. + + + + Creates a function handle for the mixed partial derivative of a multivariate function. + + Input function handle. + Vector of indices for the independent variables at descending derivative orders. + Highest derivative order. + Function handle that evaluates the fixed mixed partial derivative of input function at fixed order. + + + + Creates a function handle for the mixed partial derivative of a multivariate vector function. + + Input vector function handle. + Vector of indices for the independent variables at descending derivative orders. + Highest derivative order. + Function handle that evaluates the fixed mixed partial derivative of input function at fixed order. + + + + Resets the evaluation counter. + + + + + Class for evaluating the Hessian of a smooth continuously differentiable function using finite differences. + By default, a central 3-point method is used. + + + + + Number of function evaluations. + + + + + Creates a numerical Hessian object with a three point central difference method. + + + + + Creates a numerical Hessian with a specified differentiation scheme. + + Number of points for Hessian evaluation. + Center point for differentiation. + + + + Evaluates the Hessian of the scalar univariate function f at points x. + + Scalar univariate function handle. + Point at which to evaluate Hessian. + Hessian tensor. + + + + Evaluates the Hessian of a multivariate function f at points x. + + + This method of computing the Hessian is only vaid for Lipschitz continuous functions. + The function mirrors the Hessian along the diagonal since d2f/dxdy = d2f/dydx for continuously differentiable functions. + + Multivariate function handle.> + Points at which to evaluate Hessian.> + Hessian tensor. + + + + Resets the function evaluation counter for the Hessian. + + + + + Class for evaluating the Jacobian of a function using finite differences. + By default, a central 3-point method is used. + + + + + Number of function evaluations. + + + + + Creates a numerical Jacobian object with a three point central difference method. + + + + + Creates a numerical Jacobian with a specified differentiation scheme. + + Number of points for Jacobian evaluation. + Center point for differentiation. + + + + Evaluates the Jacobian of scalar univariate function f at point x. + + Scalar univariate function handle. + Point at which to evaluate Jacobian. + Jacobian vector. + + + + Evaluates the Jacobian of a multivariate function f at vector x. + + + This function assumes that the length of vector x consistent with the argument count of f. + + Multivariate function handle. + Points at which to evaluate Jacobian. + Jacobian vector. + + + + Evaluates the Jacobian of a multivariate function f at vector x given a current function value. + + + To minimize the number of function evaluations, a user can supply the current value of the function + to be used in computing the Jacobian. This value must correspond to the "center" location for the + finite differencing. If a scheme is used where the center value is not evaluated, this will provide no + added efficiency. This method also assumes that the length of vector x consistent with the argument count of f. + + Multivariate function handle. + Points at which to evaluate Jacobian. + Current function value at finite difference center. + Jacobian vector. + + + + Evaluates the Jacobian of a multivariate function array f at vector x. + + Multivariate function array handle. + Vector at which to evaluate Jacobian. + Jacobian matrix. + + + + Evaluates the Jacobian of a multivariate function array f at vector x given a vector of current function values. + + + To minimize the number of function evaluations, a user can supply a vector of current values of the functions + to be used in computing the Jacobian. These value must correspond to the "center" location for the + finite differencing. If a scheme is used where the center value is not evaluated, this will provide no + added efficiency. This method also assumes that the length of vector x consistent with the argument count of f. + + Multivariate function array handle. + Vector at which to evaluate Jacobian. + Vector of current function values. + Jacobian matrix. + + + + Resets the function evaluation counter for the Jacobian. + + + + + Metrics to measure the distance between two structures. + + + + + Sum of Absolute Difference (SAD), i.e. the L1-norm (Manhattan) of the difference. + + + + + Sum of Absolute Difference (SAD), i.e. the L1-norm (Manhattan) of the difference. + + + + + Sum of Absolute Difference (SAD), i.e. the L1-norm (Manhattan) of the difference. + + + + + Mean-Absolute Error (MAE), i.e. the normalized L1-norm (Manhattan) of the difference. + + + + + Mean-Absolute Error (MAE), i.e. the normalized L1-norm (Manhattan) of the difference. + + + + + Mean-Absolute Error (MAE), i.e. the normalized L1-norm (Manhattan) of the difference. + + + + + Sum of Squared Difference (SSD), i.e. the squared L2-norm (Euclidean) of the difference. + + + + + Sum of Squared Difference (SSD), i.e. the squared L2-norm (Euclidean) of the difference. + + + + + Sum of Squared Difference (SSD), i.e. the squared L2-norm (Euclidean) of the difference. + + + + + Mean-Squared Error (MSE), i.e. the normalized squared L2-norm (Euclidean) of the difference. + + + + + Mean-Squared Error (MSE), i.e. the normalized squared L2-norm (Euclidean) of the difference. + + + + + Mean-Squared Error (MSE), i.e. the normalized squared L2-norm (Euclidean) of the difference. + + + + + Euclidean Distance, i.e. the L2-norm of the difference. + + + + + Euclidean Distance, i.e. the L2-norm of the difference. + + + + + Euclidean Distance, i.e. the L2-norm of the difference. + + + + + Manhattan Distance, i.e. the L1-norm of the difference. + + + + + Manhattan Distance, i.e. the L1-norm of the difference. + + + + + Manhattan Distance, i.e. the L1-norm of the difference. + + + + + Chebyshev Distance, i.e. the Infinity-norm of the difference. + + + + + Chebyshev Distance, i.e. the Infinity-norm of the difference. + + + + + Chebyshev Distance, i.e. the Infinity-norm of the difference. + + + + + Minkowski Distance, i.e. the generalized p-norm of the difference. + + + + + Minkowski Distance, i.e. the generalized p-norm of the difference. + + + + + Minkowski Distance, i.e. the generalized p-norm of the difference. + + + + + Canberra Distance, a weighted version of the L1-norm of the difference. + + + + + Canberra Distance, a weighted version of the L1-norm of the difference. + + + + + Cosine Distance, representing the angular distance while ignoring the scale. + + + + + Cosine Distance, representing the angular distance while ignoring the scale. + + + + + Hamming Distance, i.e. the number of positions that have different values in the vectors. + + + + + Hamming Distance, i.e. the number of positions that have different values in the vectors. + + + + + Pearson's distance, i.e. 1 - the person correlation coefficient. + + + + + Jaccard distance, i.e. 1 - the Jaccard index. + + Thrown if a or b are null. + Throw if a and b are of different lengths. + Jaccard distance. + + + + Jaccard distance, i.e. 1 - the Jaccard index. + + Thrown if a or b are null. + Throw if a and b are of different lengths. + Jaccard distance. + + + + Discrete Univariate Bernoulli distribution. + The Bernoulli distribution is a distribution over bits. The parameter + p specifies the probability that a 1 is generated. + Wikipedia - Bernoulli distribution. + + + + + Initializes a new instance of the Bernoulli class. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + If the Bernoulli parameter is not in the range [0,1]. + + + + Initializes a new instance of the Bernoulli class. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + The random number generator which is used to draw random samples. + If the Bernoulli parameter is not in the range [0,1]. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Gets the probability of generating a one. Range: 0 ≤ p ≤ 1. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the mode of the distribution. + + + + + Gets all modes of the distribution. + + + + + Gets the median of the distribution. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + the cumulative distribution at location . + + + + + Generates one sample from the Bernoulli distribution. + + The random source to use. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + A random sample from the Bernoulli distribution. + + + + Samples a Bernoulli distributed random variable. + + A sample from the Bernoulli distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of Bernoulli distributed random variables. + + a sequence of samples from the distribution. + + + + Samples a Bernoulli distributed random variable. + + The random number generator to use. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + A sample from the Bernoulli distribution. + + + + Samples a sequence of Bernoulli distributed random variables. + + The random number generator to use. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + a sequence of samples from the distribution. + + + + Samples a Bernoulli distributed random variable. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + A sample from the Bernoulli distribution. + + + + Samples a sequence of Bernoulli distributed random variables. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + a sequence of samples from the distribution. + + + + Continuous Univariate Beta distribution. + For details about this distribution, see + Wikipedia - Beta distribution. + + + There are a few special cases for the parameterization of the Beta distribution. When both + shape parameters are positive infinity, the Beta distribution degenerates to a point distribution + at 0.5. When one of the shape parameters is positive infinity, the distribution degenerates to a point + distribution at the positive infinity. When both shape parameters are 0.0, the Beta distribution + degenerates to a Bernoulli distribution with parameter 0.5. When one shape parameter is 0.0, the + distribution degenerates to a point distribution at the non-zero shape parameter. + + + + + Initializes a new instance of the Beta class. + + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + + + + Initializes a new instance of the Beta class. + + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + A string representation of the Beta distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + + + + Gets the α shape parameter of the Beta distribution. Range: α ≥ 0. + + + + + Gets the β shape parameter of the Beta distribution. Range: β ≥ 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the Beta distribution. + + + + + Gets the variance of the Beta distribution. + + + + + Gets the standard deviation of the Beta distribution. + + + + + Gets the entropy of the Beta distribution. + + + + + Gets the skewness of the Beta distribution. + + + + + Gets the mode of the Beta distribution; when there are multiple answers, this routine will return 0.5. + + + + + Gets the median of the Beta distribution. + + + + + Gets the minimum of the Beta distribution. + + + + + Gets the maximum of the Beta distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Generates a sample from the Beta distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Beta distribution. + + a sequence of samples from the distribution. + + + + Samples Beta distributed random variables by sampling two Gamma variables and normalizing. + + The random number generator to use. + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + a random number from the Beta distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Generates a sample from the distribution. + + The random number generator to use. + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Initializes a new instance of the BetaScaled class. + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + + + + Initializes a new instance of the BetaScaled class. + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The random number generator which is used to draw random samples. + + + + Create a Beta PERT distribution, used in risk analysis and other domains where an expert forecast + is used to construct an underlying beta distribution. + + The minimum value. + The maximum value. + The most likely value (mode). + The random number generator which is used to draw random samples. + The Beta distribution derived from the PERT parameters. + + + + A string representation of the distribution. + + A string representation of the BetaScaled distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + + + + Gets the α shape parameter of the BetaScaled distribution. Range: α > 0. + + + + + Gets the β shape parameter of the BetaScaled distribution. Range: β > 0. + + + + + Gets the location (μ) of the BetaScaled distribution. + + + + + Gets the scale (σ) of the BetaScaled distribution. Range: σ > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the BetaScaled distribution. + + + + + Gets the variance of the BetaScaled distribution. + + + + + Gets the standard deviation of the BetaScaled distribution. + + + + + Gets the entropy of the BetaScaled distribution. + + + + + Gets the skewness of the BetaScaled distribution. + + + + + Gets the mode of the BetaScaled distribution; when there are multiple answers, this routine will return 0.5. + + + + + Gets the median of the BetaScaled distribution. + + + + + Gets the minimum of the BetaScaled distribution. + + + + + Gets the maximum of the BetaScaled distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Generates a sample from the distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Generates a sample from the distribution. + + The random number generator to use. + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Discrete Univariate Binomial distribution. + For details about this distribution, see + Wikipedia - Binomial distribution. + + + The distribution is parameterized by a probability (between 0.0 and 1.0). + + + + + Initializes a new instance of the Binomial class. + + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + If is not in the interval [0.0,1.0]. + If is negative. + + + + Initializes a new instance of the Binomial class. + + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + The random number generator which is used to draw random samples. + If is not in the interval [0.0,1.0]. + If is negative. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + + + + Gets the success probability in each trial. Range: 0 ≤ p ≤ 1. + + + + + Gets the number of trials. Range: n ≥ 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the mode of the distribution. + + + + + Gets all modes of the distribution. + + + + + Gets the median of the distribution. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + the cumulative distribution at location . + + + + + Generates a sample from the Binomial distribution without doing parameter checking. + + The random number generator to use. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + The number of successful trials. + + + + Samples a Binomially distributed random variable. + + The number of successes in N trials. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of Binomially distributed random variables. + + a sequence of successes in N trials. + + + + Samples a binomially distributed random variable. + + The random number generator to use. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + The number of successes in trials. + + + + Samples a sequence of binomially distributed random variable. + + The random number generator to use. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + a sequence of successes in trials. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + a sequence of successes in trials. + + + + Samples a binomially distributed random variable. + + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + The number of successes in trials. + + + + Samples a sequence of binomially distributed random variable. + + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + a sequence of successes in trials. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + a sequence of successes in trials. + + + + Discrete Univariate Categorical distribution. + For details about this distribution, see + Wikipedia - Categorical distribution. This + distribution is sometimes called the Discrete distribution. + + + The distribution is parameterized by a vector of ratios: in other words, the parameter + does not have to be normalized and sum to 1. The reason is that some vectors can't be exactly normalized + to sum to 1 in floating point representation. + + + Support: 0..k where k = length(probability mass array)-1 + + + + + Initializes a new instance of the Categorical class. + + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + If any of the probabilities are negative or do not sum to one. + + + + Initializes a new instance of the Categorical class. + + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + The random number generator which is used to draw random samples. + If any of the probabilities are negative or do not sum to one. + + + + Initializes a new instance of the Categorical class from a . The distribution + will not be automatically updated when the histogram changes. The categorical distribution will have + one value for each bucket and a probability for that value proportional to the bucket count. + + The histogram from which to create the categorical variable. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Checks whether the parameters of the distribution are valid. + + An array of nonnegative ratios: this array does not need to be normalized as this is often impossible using floating point arithmetic. + If any of the probabilities are negative returns false, or if the sum of parameters is 0.0; otherwise true + + + + Checks whether the parameters of the distribution are valid. + + An array of nonnegative ratios: this array does not need to be normalized as this is often impossible using floating point arithmetic. + If any of the probabilities are negative returns false, or if the sum of parameters is 0.0; otherwise true + + + + Gets the probability mass vector (non-negative ratios) of the multinomial. + + Sometimes the normalized probability vector cannot be represented exactly in a floating point representation. + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + Throws a . + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Gets he mode of the distribution. + + Throws a . + + + + Gets the median of the distribution. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. + + A real number between 0 and 1. + An integer between 0 and the size of the categorical (exclusive), that corresponds to the inverse CDF for the given probability. + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. + + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + A real number between 0 and 1. + An integer between 0 and the size of the categorical (exclusive), that corresponds to the inverse CDF for the given probability. + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. + + An array corresponding to a CDF for a categorical distribution. Not assumed to be normalized. + A real number between 0 and 1. + An integer between 0 and the size of the categorical (exclusive), that corresponds to the inverse CDF for the given probability. + + + + Computes the cumulative distribution function. This method performs no parameter checking. + If the probability mass was normalized, the resulting cumulative distribution is normalized as well (up to numerical errors). + + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + An array representing the unnormalized cumulative distribution function. + + + + Returns one trials from the categorical distribution. + + The random number generator to use. + The (unnormalized) cumulative distribution of the probability distribution. + One sample from the categorical distribution implied by . + + + + Samples a Binomially distributed random variable. + + The number of successful trials. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of Bernoulli distributed random variables. + + a sequence of successful trial counts. + + + + Samples one categorical distributed random variable; also known as the Discrete distribution. + + The random number generator to use. + An array of nonnegative ratios. Not assumed to be normalized. + One random integer between 0 and the size of the categorical (exclusive). + + + + Samples a categorically distributed random variable. + + The random number generator to use. + An array of nonnegative ratios. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + An array of nonnegative ratios. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Samples one categorical distributed random variable; also known as the Discrete distribution. + + An array of nonnegative ratios. Not assumed to be normalized. + One random integer between 0 and the size of the categorical (exclusive). + + + + Samples a categorically distributed random variable. + + An array of nonnegative ratios. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + An array of nonnegative ratios. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Samples one categorical distributed random variable; also known as the Discrete distribution. + + The random number generator to use. + An array of the cumulative distribution. Not assumed to be normalized. + One random integer between 0 and the size of the categorical (exclusive). + + + + Samples a categorically distributed random variable. + + The random number generator to use. + An array of the cumulative distribution. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + An array of the cumulative distribution. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Samples one categorical distributed random variable; also known as the Discrete distribution. + + An array of the cumulative distribution. Not assumed to be normalized. + One random integer between 0 and the size of the categorical (exclusive). + + + + Samples a categorically distributed random variable. + + An array of the cumulative distribution. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + An array of the cumulative distribution. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Continuous Univariate Cauchy distribution. + The Cauchy distribution is a symmetric continuous probability distribution. For details about this distribution, see + Wikipedia - Cauchy distribution. + + + + + Initializes a new instance of the class with the location parameter set to 0 and the scale parameter set to 1 + + + + + Initializes a new instance of the class. + + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + + + + Initializes a new instance of the class. + + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + + + + Gets the location (x0) of the distribution. + + + + + Gets the scale (γ) of the distribution. Range: γ > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Draws a random sample from the distribution. + + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Cauchy distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + the inverse cumulative density at . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + a sequence of samples from the distribution. + + + + Continuous Univariate Chi distribution. + This distribution is a continuous probability distribution. The distribution usually arises when a k-dimensional vector's orthogonal + components are independent and each follow a standard normal distribution. The length of the vector will + then have a chi distribution. + Wikipedia - Chi distribution. + + + + + Initializes a new instance of the class. + + The degrees of freedom (k) of the distribution. Range: k > 0. + + + + Initializes a new instance of the class. + + The degrees of freedom (k) of the distribution. Range: k > 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The degrees of freedom (k) of the distribution. Range: k > 0. + + + + Gets the degrees of freedom (k) of the Chi distribution. Range: k > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Generates a sample from the Chi distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Chi distribution. + + a sequence of samples from the distribution. + + + + Samples the distribution. + + The random number generator to use. + The degrees of freedom (k) of the distribution. Range: k > 0. + a random number from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The degrees of freedom (k) of the distribution. Range: k > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The degrees of freedom (k) of the distribution. Range: k > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The degrees of freedom (k) of the distribution. Range: k > 0. + the cumulative distribution at location . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The degrees of freedom (k) of the distribution. Range: k > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sequence of samples from the distribution. + + + + Continuous Univariate Chi-Squared distribution. + This distribution is a sum of the squares of k independent standard normal random variables. + Wikipedia - ChiSquare distribution. + + + + + Initializes a new instance of the class. + + The degrees of freedom (k) of the distribution. Range: k > 0. + + + + Initializes a new instance of the class. + + The degrees of freedom (k) of the distribution. Range: k > 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The degrees of freedom (k) of the distribution. Range: k > 0. + + + + Gets the degrees of freedom (k) of the Chi-Squared distribution. Range: k > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Generates a sample from the ChiSquare distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the ChiSquare distribution. + + a sequence of samples from the distribution. + + + + Samples the distribution. + + The random number generator to use. + The degrees of freedom (k) of the distribution. Range: k > 0. + a random number from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The degrees of freedom (k) of the distribution. Range: k > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The degrees of freedom (k) of the distribution. Range: k > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The degrees of freedom (k) of the distribution. Range: k > 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The degrees of freedom (k) of the distribution. Range: k > 0. + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + Generates a sample from the ChiSquare distribution. + + The random number generator to use. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Generates a sample from the ChiSquare distribution. + + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Continuous Univariate Uniform distribution. + The continuous uniform distribution is a distribution over real numbers. For details about this distribution, see + Wikipedia - Continuous uniform distribution. + + + + + Initializes a new instance of the ContinuousUniform class with lower bound 0 and upper bound 1. + + + + + Initializes a new instance of the ContinuousUniform class with given lower and upper bounds. + + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + If the upper bound is smaller than the lower bound. + + + + Initializes a new instance of the ContinuousUniform class with given lower and upper bounds. + + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + The random number generator which is used to draw random samples. + If the upper bound is smaller than the lower bound. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + + + + Gets the lower bound of the distribution. + + + + + Gets the upper bound of the distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + + Gets the median of the distribution. + + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Generates a sample from the ContinuousUniform distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the ContinuousUniform distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + the inverse cumulative density at . + + + + + Generates a sample from the ContinuousUniform distribution. + + The random number generator to use. + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + a uniformly distributed sample. + + + + Generates a sequence of samples from the ContinuousUniform distribution. + + The random number generator to use. + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + a sequence of uniformly distributed samples. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + a sequence of samples from the distribution. + + + + Generates a sample from the ContinuousUniform distribution. + + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + a uniformly distributed sample. + + + + Generates a sequence of samples from the ContinuousUniform distribution. + + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + a sequence of uniformly distributed samples. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + a sequence of samples from the distribution. + + + + Discrete Univariate Conway-Maxwell-Poisson distribution. + The Conway-Maxwell-Poisson distribution is a generalization of the Poisson, Geometric and Bernoulli + distributions. It is parameterized by two real numbers "lambda" and "nu". For + + nu = 0 the distribution reverts to a Geometric distribution + nu = 1 the distribution reverts to the Poisson distribution + nu -> infinity the distribution converges to a Bernoulli distribution + + This implementation will cache the value of the normalization constant. + Wikipedia - ConwayMaxwellPoisson distribution. + + + + + The mean of the distribution. + + + + + The variance of the distribution. + + + + + Caches the value of the normalization constant. + + + + + Since many properties of the distribution can only be computed approximately, the tolerance + level specifies how much error we accept. + + + + + Initializes a new instance of the class. + + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Initializes a new instance of the class. + + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + The random number generator which is used to draw random samples. + + + + Returns a that represents this instance. + + A that represents this instance. + + + + Tests whether the provided values are valid parameters for this distribution. + + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Gets the lambda (λ) parameter. Range: λ > 0. + + + + + Gets the rate of decay (ν) parameter. Range: ν ≥ 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution + + + + + Gets the median of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + the cumulative distribution at location . + + + + + Gets the normalization constant of the Conway-Maxwell-Poisson distribution. + + + + + Computes an approximate normalization constant for the CMP distribution. + + The lambda (λ) parameter for the CMP distribution. + The rate of decay (ν) parameter for the CMP distribution. + + an approximate normalization constant for the CMP distribution. + + + + + Returns one trials from the distribution. + + The random number generator to use. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + The z parameter. + + One sample from the distribution implied by , , and . + + + + + Samples a Conway-Maxwell-Poisson distributed random variable. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Samples a sequence of a Conway-Maxwell-Poisson distributed random variables. + + + a sequence of samples from a Conway-Maxwell-Poisson distribution. + + + + + Samples a random variable. + + The random number generator to use. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Samples a sequence of this random variable. + + The random number generator to use. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Samples a random variable. + + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Samples a sequence of this random variable. + + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Multivariate Dirichlet distribution. For details about this distribution, see + Wikipedia - Dirichlet distribution. + + + + + Initializes a new instance of the Dirichlet class. The distribution will + be initialized with the default random number generator. + + An array with the Dirichlet parameters. + + + + Initializes a new instance of the Dirichlet class. The distribution will + be initialized with the default random number generator. + + An array with the Dirichlet parameters. + The random number generator which is used to draw random samples. + + + + Initializes a new instance of the class. + random number generator. + The value of each parameter of the Dirichlet distribution. + The dimension of the Dirichlet distribution. + + + + Initializes a new instance of the class. + random number generator. + The value of each parameter of the Dirichlet distribution. + The dimension of the Dirichlet distribution. + The random number generator which is used to draw random samples. + + + + Returns a that represents this instance. + + + A that represents this instance. + + + + + Tests whether the provided values are valid parameters for this distribution. + No parameter can be less than zero and at least one parameter should be larger than zero. + + The parameters of the Dirichlet distribution. + + + + Gets or sets the parameters of the Dirichlet distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the dimension of the Dirichlet distribution. + + + + + Gets the sum of the Dirichlet parameters. + + + + + Gets the mean of the Dirichlet distribution. + + + + + Gets the variance of the Dirichlet distribution. + + + + + Gets the entropy of the distribution. + + + + + Computes the density of the distribution. + + The locations at which to compute the density. + the density at . + The Dirichlet distribution requires that the sum of the components of x equals 1. + You can also leave out the last component, and it will be computed from the others. + + + + Computes the log density of the distribution. + + The locations at which to compute the density. + the density at . + + + + Samples a Dirichlet distributed random vector. + + A sample from this distribution. + + + + Samples a Dirichlet distributed random vector. + + The random number generator to use. + The Dirichlet distribution parameter. + a sample from the distribution. + + + + Discrete Univariate Uniform distribution. + The discrete uniform distribution is a distribution over integers. The distribution + is parameterized by a lower and upper bound (both inclusive). + Wikipedia - Discrete uniform distribution. + + + + + Initializes a new instance of the DiscreteUniform class. + + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + + + + Initializes a new instance of the DiscreteUniform class. + + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + The random number generator which is used to draw random samples. + + + + Returns a that represents this instance. + + + A that represents this instance. + + + + + Tests whether the provided values are valid parameters for this distribution. + + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + + + + Gets the inclusive lower bound of the probability distribution. + + + + + Gets the inclusive upper bound of the probability distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the mode of the distribution; since every element in the domain has the same probability this method returns the middle one. + + + + + Gets the median of the distribution. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + the cumulative distribution at location . + + + + + Generates one sample from the discrete uniform distribution. This method does not do any parameter checking. + + The random source to use. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + A random sample from the discrete uniform distribution. + + + + Draws a random sample from the distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of uniformly distributed random variables. + + a sequence of samples from the distribution. + + + + Samples a uniformly distributed random variable. + + The random number generator to use. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + A sample from the discrete uniform distribution. + + + + Samples a sequence of uniformly distributed random variables. + + The random number generator to use. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + a sequence of samples from the discrete uniform distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + a sequence of samples from the discrete uniform distribution. + + + + Samples a uniformly distributed random variable. + + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + A sample from the discrete uniform distribution. + + + + Samples a sequence of uniformly distributed random variables. + + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + a sequence of samples from the discrete uniform distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + a sequence of samples from the discrete uniform distribution. + + + + Continuous Univariate Erlang distribution. + This distribution is is a continuous probability distribution with wide applicability primarily due to its + relation to the exponential and Gamma distributions. + Wikipedia - Erlang distribution. + + + + + Initializes a new instance of the class. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + + + + Initializes a new instance of the class. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + The random number generator which is used to draw random samples. + + + + Constructs a Erlang distribution from a shape and scale parameter. The distribution will + be initialized with the default random number generator. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The scale (μ) of the Erlang distribution. Range: μ ≥ 0. + The random number generator which is used to draw random samples. Optional, can be null. + + + + Constructs a Erlang distribution from a shape and inverse scale parameter. The distribution will + be initialized with the default random number generator. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + The random number generator which is used to draw random samples. Optional, can be null. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + + + + Gets the shape (k) of the Erlang distribution. Range: k ≥ 0. + + + + + Gets the rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + + + + + Gets the scale of the Erlang distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum value. + + + + + Gets the Maximum value. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Generates a sample from the Erlang distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Erlang distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + the cumulative distribution at location . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Continuous Univariate Exponential distribution. + The exponential distribution is a distribution over the real numbers parameterized by one non-negative parameter. + Wikipedia - exponential distribution. + + + + + Initializes a new instance of the class. + + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + + + + Initializes a new instance of the class. + + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + + + + Gets the rate (λ) parameter of the distribution. Range: λ ≥ 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Draws a random sample from the distribution. + + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Exponential distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + the inverse cumulative density at . + + + + + Draws a random sample from the distribution. + + The random number generator to use. + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Generates a sequence of samples from the Exponential distribution. + + The random number generator to use. + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Draws a random sample from the distribution. + + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Generates a sequence of samples from the Exponential distribution. + + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Continuous Univariate F-distribution, also known as Fisher-Snedecor distribution. + For details about this distribution, see + Wikipedia - FisherSnedecor distribution. + + + + + Initializes a new instance of the class. + + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + + + + Initializes a new instance of the class. + + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + + + + Gets the first degree of freedom (d1) of the distribution. Range: d1 > 0. + + + + + Gets the second degree of freedom (d2) of the distribution. Range: d2 > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Generates a sample from the FisherSnedecor distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the FisherSnedecor distribution. + + a sequence of samples from the distribution. + + + + Generates one sample from the FisherSnedecor distribution without parameter checking. + + The random number generator to use. + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + a FisherSnedecor distributed random number. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Generates a sample from the distribution. + + The random number generator to use. + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + a sequence of samples from the distribution. + + + + Continuous Univariate Gamma distribution. + For details about this distribution, see + Wikipedia - Gamma distribution. + + + The Gamma distribution is parametrized by a shape and inverse scale parameter. When we want + to specify a Gamma distribution which is a point distribution we set the shape parameter to be the + location of the point distribution and the inverse scale as positive infinity. The distribution + with shape and inverse scale both zero is undefined. + + Random number generation for the Gamma distribution is based on the algorithm in: + "A Simple Method for Generating Gamma Variables" - Marsaglia & Tsang + ACM Transactions on Mathematical Software, Vol. 26, No. 3, September 2000, Pages 363–372. + + + + + Initializes a new instance of the Gamma class. + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + + + + Initializes a new instance of the Gamma class. + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + The random number generator which is used to draw random samples. + + + + Constructs a Gamma distribution from a shape and scale parameter. The distribution will + be initialized with the default random number generator. + + The shape (k) of the Gamma distribution. Range: k ≥ 0. + The scale (θ) of the Gamma distribution. Range: θ ≥ 0 + The random number generator which is used to draw random samples. Optional, can be null. + + + + Constructs a Gamma distribution from a shape and inverse scale parameter. The distribution will + be initialized with the default random number generator. + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + The random number generator which is used to draw random samples. Optional, can be null. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + + + + Gets or sets the shape (k, α) of the Gamma distribution. Range: α ≥ 0. + + + + + Gets or sets the rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + + + + + Gets or sets the scale (θ) of the Gamma distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the Gamma distribution. + + + + + Gets the variance of the Gamma distribution. + + + + + Gets the standard deviation of the Gamma distribution. + + + + + Gets the entropy of the Gamma distribution. + + + + + Gets the skewness of the Gamma distribution. + + + + + Gets the mode of the Gamma distribution. + + + + + Gets the median of the Gamma distribution. + + + + + Gets the minimum of the Gamma distribution. + + + + + Gets the maximum of the Gamma distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Generates a sample from the Gamma distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Gamma distribution. + + a sequence of samples from the distribution. + + + + Sampling implementation based on: + "A Simple Method for Generating Gamma Variables" - Marsaglia & Tsang + ACM Transactions on Mathematical Software, Vol. 26, No. 3, September 2000, Pages 363–372. + This method performs no parameter checks. + + The random number generator to use. + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + A sample from a Gamma distributed random variable. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + the inverse cumulative density at . + + + + + Generates a sample from the Gamma distribution. + + The random number generator to use. + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the Gamma distribution. + + The random number generator to use. + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Generates a sample from the Gamma distribution. + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the Gamma distribution. + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Discrete Univariate Geometric distribution. + The Geometric distribution is a distribution over positive integers parameterized by one positive real number. + This implementation of the Geometric distribution will never generate 0's. + Wikipedia - geometric distribution. + + + + + Initializes a new instance of the Geometric class. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Initializes a new instance of the Geometric class. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + The random number generator which is used to draw random samples. + + + + Returns a that represents this instance. + + A that represents this instance. + + + + Tests whether the provided values are valid parameters for this distribution. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Gets the probability of generating a one. Range: 0 ≤ p ≤ 1. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + Throws a not supported exception. + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + the cumulative distribution at location . + + + + + Returns one sample from the distribution. + + The random number generator to use. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + One sample from the distribution implied by . + + + + Samples a Geometric distributed random variable. + + A sample from the Geometric distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of Geometric distributed random variables. + + a sequence of samples from the distribution. + + + + Samples a random variable. + + The random number generator to use. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Samples a sequence of this random variable. + + The random number generator to use. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Samples a random variable. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Samples a sequence of this random variable. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Discrete Univariate Hypergeometric distribution. + This distribution is a discrete probability distribution that describes the number of successes in a sequence + of n draws from a finite population without replacement, just as the binomial distribution + describes the number of successes for draws with replacement + Wikipedia - Hypergeometric distribution. + + + + + Initializes a new instance of the Hypergeometric class. + + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Initializes a new instance of the Hypergeometric class. + + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + The random number generator which is used to draw random samples. + + + + Returns a that represents this instance. + + + A that represents this instance. + + + + + Tests whether the provided values are valid parameters for this distribution. + + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the size of the population (N). + + + + + Gets the number of draws without replacement (n). + + + + + Gets the number successes within the population (K, M). + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + the cumulative distribution at location . + + + + + Generates a sample from the Hypergeometric distribution without doing parameter checking. + + The random number generator to use. + The size of the population (N). + The number successes within the population (K, M). + The n parameter of the distribution. + a random number from the Hypergeometric distribution. + + + + Samples a Hypergeometric distributed random variable. + + The number of successes in n trials. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of Hypergeometric distributed random variables. + + a sequence of successes in n trials. + + + + Samples a random variable. + + The random number generator to use. + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Samples a sequence of this random variable. + + The random number generator to use. + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Samples a random variable. + + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Samples a sequence of this random variable. + + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Continuous Univariate Probability Distribution. + + + + + + Gets the mode of the distribution. + + + + + Gets the smallest element in the domain of the distribution which can be represented by a double. + + + + + Gets the largest element in the domain of the distribution which can be represented by a double. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + Draws a random sample from the distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Draws a sequence of random samples from the distribution. + + an infinite sequence of samples from the distribution. + + + + Discrete Univariate Probability Distribution. + + + + + + Gets the mode of the distribution. + + + + + Gets the smallest element in the domain of the distribution which can be represented by an integer. + + + + + Gets the largest element in the domain of the distribution which can be represented by an integer. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Draws a random sample from the distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Draws a sequence of random samples from the distribution. + + an infinite sequence of samples from the distribution. + + + + Probability Distribution. + + + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Continuous Univariate Inverse Gamma distribution. + The inverse Gamma distribution is a distribution over the positive real numbers parameterized by + two positive parameters. + Wikipedia - InverseGamma distribution. + + + + + Initializes a new instance of the class. + + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + + + + Initializes a new instance of the class. + + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + + + + Gets or sets the shape (α) parameter. Range: α > 0. + + + + + Gets or sets The scale (β) parameter. Range: β > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + Throws . + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Draws a random sample from the distribution. + + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Cauchy distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + the cumulative distribution at location . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + a sequence of samples from the distribution. + + + + Multivariate Inverse Wishart distribution. This distribution is + parameterized by the degrees of freedom nu and the scale matrix S. The inverse Wishart distribution + is the conjugate prior for the covariance matrix of a multivariate normal distribution. + Wikipedia - Inverse-Wishart distribution. + + + + + Caches the Cholesky factorization of the scale matrix. + + + + + Initializes a new instance of the class. + + The degree of freedom (ν) for the inverse Wishart distribution. + The scale matrix (Ψ) for the inverse Wishart distribution. + + + + Initializes a new instance of the class. + + The degree of freedom (ν) for the inverse Wishart distribution. + The scale matrix (Ψ) for the inverse Wishart distribution. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The degree of freedom (ν) for the inverse Wishart distribution. + The scale matrix (Ψ) for the inverse Wishart distribution. + + + + Gets or sets the degree of freedom (ν) for the inverse Wishart distribution. + + + + + Gets or sets the scale matrix (Ψ) for the inverse Wishart distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean. + + The mean of the distribution. + + + + Gets the mode of the distribution. + + The mode of the distribution. + A. O'Hagan, and J. J. Forster (2004). Kendall's Advanced Theory of Statistics: Bayesian Inference. 2B (2 ed.). Arnold. ISBN 0-340-80752-0. + + + + Gets the variance of the distribution. + + The variance of the distribution. + Kanti V. Mardia, J. T. Kent and J. M. Bibby (1979). Multivariate Analysis. + + + + Evaluates the probability density function for the inverse Wishart distribution. + + The matrix at which to evaluate the density at. + If the argument does not have the same dimensions as the scale matrix. + the density at . + + + + Samples an inverse Wishart distributed random variable by sampling + a Wishart random variable and inverting the matrix. + + a sample from the distribution. + + + + Samples an inverse Wishart distributed random variable by sampling + a Wishart random variable and inverting the matrix. + + The random number generator to use. + The degree of freedom (ν) for the inverse Wishart distribution. + The scale matrix (Ψ) for the inverse Wishart distribution. + a sample from the distribution. + + + + Univariate Probability Distribution. + + + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the median of the distribution. + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Continuous Univariate Laplace distribution. + The Laplace distribution is a distribution over the real numbers parameterized by a mean and + scale parameter. The PDF is: + p(x) = \frac{1}{2 * scale} \exp{- |x - mean| / scale}. + Wikipedia - Laplace distribution. + + + + + Initializes a new instance of the class (location = 0, scale = 1). + + + + + Initializes a new instance of the class. + + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + If is negative. + + + + Initializes a new instance of the class. + + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + The random number generator which is used to draw random samples. + If is negative. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + + + + Gets the location (μ) of the Laplace distribution. + + + + + Gets the scale (b) of the Laplace distribution. Range: b > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Samples a Laplace distributed random variable. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sample from the Laplace distribution. + + a sample from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + the cumulative distribution at location . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + a sequence of samples from the distribution. + + + + Continuous Univariate Log-Normal distribution. + For details about this distribution, see + Wikipedia - Log-Normal distribution. + + + + + Initializes a new instance of the class. + The distribution will be initialized with the default + random number generator. + + The log-scale (μ) of the logarithm of the distribution. + The shape (σ) of the logarithm of the distribution. Range: σ ≥ 0. + + + + Initializes a new instance of the class. + The distribution will be initialized with the default + random number generator. + + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + The random number generator which is used to draw random samples. + + + + Constructs a log-normal distribution with the desired mu and sigma parameters. + + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + The random number generator which is used to draw random samples. Optional, can be null. + A log-normal distribution. + + + + Constructs a log-normal distribution with the desired mean and variance. + + The mean of the log-normal distribution. + The variance of the log-normal distribution. + The random number generator which is used to draw random samples. Optional, can be null. + A log-normal distribution. + + + + Estimates the log-normal distribution parameters from sample data with maximum-likelihood. + + The samples to estimate the distribution parameters from. + The random number generator which is used to draw random samples. Optional, can be null. + A log-normal distribution. + MATLAB: lognfit + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + + + + Gets the log-scale (μ) (mean of the logarithm) of the distribution. + + + + + Gets the shape (σ) (standard deviation of the logarithm) of the distribution. Range: σ ≥ 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mu of the log-normal distribution. + + + + + Gets the variance of the log-normal distribution. + + + + + Gets the standard deviation of the log-normal distribution. + + + + + Gets the entropy of the log-normal distribution. + + + + + Gets the skewness of the log-normal distribution. + + + + + Gets the mode of the log-normal distribution. + + + + + Gets the median of the log-normal distribution. + + + + + Gets the minimum of the log-normal distribution. + + + + + Gets the maximum of the log-normal distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Generates a sample from the log-normal distribution using the Box-Muller algorithm. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the log-normal distribution using the Box-Muller algorithm. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + the density at . + + MATLAB: lognpdf + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the density. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + the cumulative distribution at location . + + MATLAB: logncdf + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + the inverse cumulative density at . + + MATLAB: logninv + + + + Generates a sample from the log-normal distribution using the Box-Muller algorithm. + + The random number generator to use. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the log-normal distribution using the Box-Muller algorithm. + + The random number generator to use. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + Generates a sample from the log-normal distribution using the Box-Muller algorithm. + + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the log-normal distribution using the Box-Muller algorithm. + + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + Multivariate Matrix-valued Normal distributions. The distribution + is parameterized by a mean matrix (M), a covariance matrix for the rows (V) and a covariance matrix + for the columns (K). If the dimension of M is d-by-m then V is d-by-d and K is m-by-m. + Wikipedia - MatrixNormal distribution. + + + + + The mean of the matrix normal distribution. + + + + + The covariance matrix for the rows. + + + + + The covariance matrix for the columns. + + + + + Initializes a new instance of the class. + + The mean of the matrix normal. + The covariance matrix for the rows. + The covariance matrix for the columns. + If the dimensions of the mean and two covariance matrices don't match. + + + + Initializes a new instance of the class. + + The mean of the matrix normal. + The covariance matrix for the rows. + The covariance matrix for the columns. + The random number generator which is used to draw random samples. + If the dimensions of the mean and two covariance matrices don't match. + + + + Returns a that represents this instance. + + + A that represents this instance. + + + + + Tests whether the provided values are valid parameters for this distribution. + + The mean of the matrix normal. + The covariance matrix for the rows. + The covariance matrix for the columns. + + + + Gets the mean. (M) + + The mean of the distribution. + + + + Gets the row covariance. (V) + + The row covariance. + + + + Gets the column covariance. (K) + + The column covariance. + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Evaluates the probability density function for the matrix normal distribution. + + The matrix at which to evaluate the density at. + the density at + If the argument does not have the correct dimensions. + + + + Samples a matrix normal distributed random variable. + + A random number from this distribution. + + + + Samples a matrix normal distributed random variable. + + The random number generator to use. + The mean of the matrix normal. + The covariance matrix for the rows. + The covariance matrix for the columns. + If the dimensions of the mean and two covariance matrices don't match. + a sequence of samples from the distribution. + + + + Samples a vector normal distributed random variable. + + The random number generator to use. + The mean of the vector normal distribution. + The covariance matrix of the vector normal distribution. + a sequence of samples from defined distribution. + + + + Multivariate Multinomial distribution. For details about this distribution, see + Wikipedia - Multinomial distribution. + + + The distribution is parameterized by a vector of ratios: in other words, the parameter + does not have to be normalized and sum to 1. The reason is that some vectors can't be exactly normalized + to sum to 1 in floating point representation. + + + + + Stores the normalized multinomial probabilities. + + + + + The number of trials. + + + + + Initializes a new instance of the Multinomial class. + + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + The number of trials. + If any of the probabilities are negative or do not sum to one. + If is negative. + + + + Initializes a new instance of the Multinomial class. + + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + The number of trials. + The random number generator which is used to draw random samples. + If any of the probabilities are negative or do not sum to one. + If is negative. + + + + Initializes a new instance of the Multinomial class from histogram . The distribution will + not be automatically updated when the histogram changes. + + Histogram instance + The number of trials. + If any of the probabilities are negative or do not sum to one. + If is negative. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + The number of trials. + If any of the probabilities are negative returns false, + if the sum of parameters is 0.0, or if the number of trials is negative; otherwise true. + + + + Gets the proportion of ratios. + + + + + Gets the number of trials. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Computes values of the probability mass function. + + Non-negative integers x1, ..., xk + The probability mass at location . + When is null. + When length of is not equal to event probabilities count. + + + + Computes values of the log probability mass function. + + Non-negative integers x1, ..., xk + The log probability mass at location . + When is null. + When length of is not equal to event probabilities count. + + + + Samples one multinomial distributed random variable. + + the counts for each of the different possible values. + + + + Samples a sequence multinomially distributed random variables. + + a sequence of counts for each of the different possible values. + + + + Samples one multinomial distributed random variable. + + The random number generator to use. + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + The number of trials. + the counts for each of the different possible values. + + + + Samples a multinomially distributed random variable. + + The random number generator to use. + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + The number of variables needed. + a sequence of counts for each of the different possible values. + + + + Discrete Univariate Negative Binomial distribution. + The negative binomial is a distribution over the natural numbers with two parameters r, p. For the special + case that r is an integer one can interpret the distribution as the number of failures before the r'th success + when the probability of success is p. + Wikipedia - NegativeBinomial distribution. + + + + + Initializes a new instance of the class. + + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Initializes a new instance of the class. + + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + The random number generator which is used to draw random samples. + + + + Returns a that represents this instance. + + + A that represents this instance. + + + + + Tests whether the provided values are valid parameters for this distribution. + + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Gets the number of successes. Range: r ≥ 0. + + + + + Gets the probability of success. Range: 0 ≤ p ≤ 1. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution + + + + + Gets the median of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + the cumulative distribution at location . + + + + + Samples a negative binomial distributed random variable. + + The random number generator to use. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + a sample from the distribution. + + + + Samples a NegativeBinomial distributed random variable. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of NegativeBinomial distributed random variables. + + a sequence of samples from the distribution. + + + + Samples a random variable. + + The random number generator to use. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Samples a sequence of this random variable. + + The random number generator to use. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Samples a random variable. + + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Samples a sequence of this random variable. + + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Continuous Univariate Normal distribution, also known as Gaussian distribution. + For details about this distribution, see + Wikipedia - Normal distribution. + + + + + Initializes a new instance of the Normal class. This is a normal distribution with mean 0.0 + and standard deviation 1.0. The distribution will + be initialized with the default random number generator. + + + + + Initializes a new instance of the Normal class. This is a normal distribution with mean 0.0 + and standard deviation 1.0. The distribution will + be initialized with the default random number generator. + + The random number generator which is used to draw random samples. + + + + Initializes a new instance of the Normal class with a particular mean and standard deviation. The distribution will + be initialized with the default random number generator. + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + + + + Initializes a new instance of the Normal class with a particular mean and standard deviation. The distribution will + be initialized with the default random number generator. + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + The random number generator which is used to draw random samples. + + + + Constructs a normal distribution from a mean and standard deviation. + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + The random number generator which is used to draw random samples. Optional, can be null. + a normal distribution. + + + + Constructs a normal distribution from a mean and variance. + + The mean (μ) of the normal distribution. + The variance (σ^2) of the normal distribution. + The random number generator which is used to draw random samples. Optional, can be null. + A normal distribution. + + + + Constructs a normal distribution from a mean and precision. + + The mean (μ) of the normal distribution. + The precision of the normal distribution. + The random number generator which is used to draw random samples. Optional, can be null. + A normal distribution. + + + + Estimates the normal distribution parameters from sample data with maximum-likelihood. + + The samples to estimate the distribution parameters from. + The random number generator which is used to draw random samples. Optional, can be null. + A normal distribution. + MATLAB: normfit + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + + + + Gets the mean (μ) of the normal distribution. + + + + + Gets the standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + + + + + Gets the variance of the normal distribution. + + + + + Gets the precision of the normal distribution. + + + + + Gets the random number generator which is used to draw random samples. + + + + + Gets the entropy of the normal distribution. + + + + + Gets the skewness of the normal distribution. + + + + + Gets the mode of the normal distribution. + + + + + Gets the median of the normal distribution. + + + + + Gets the minimum of the normal distribution. + + + + + Gets the maximum of the normal distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Generates a sample from the normal distribution using the Box-Muller algorithm. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the normal distribution using the Box-Muller algorithm. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + The location at which to compute the density. + the density at . + + MATLAB: normpdf + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + the cumulative distribution at location . + + MATLAB: normcdf + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + the inverse cumulative density at . + + MATLAB: norminv + + + + Generates a sample from the normal distribution using the Box-Muller algorithm. + + The random number generator to use. + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the normal distribution using the Box-Muller algorithm. + + The random number generator to use. + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + Generates a sample from the normal distribution using the Box-Muller algorithm. + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the normal distribution using the Box-Muller algorithm. + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + This structure represents the type over which the distribution + is defined. + + + + + The mean value. + + + + + The precision value. + + + + + Initializes a new instance of the struct. + + The mean of the pair. + The precision of the pair. + + + + Gets or sets the mean of the pair. + + + + + Gets or sets the precision of the pair. + + + + + Multivariate Normal-Gamma Distribution. + The distribution is the conjugate prior distribution for the + distribution. It specifies a prior over the mean and precision of the distribution. + It is parameterized by four numbers: the mean location, the mean scale, the precision shape and the + precision inverse scale. + The distribution NG(mu, tau | mloc,mscale,psscale,pinvscale) = Normal(mu | mloc, 1/(mscale*tau)) * Gamma(tau | psscale,pinvscale). + The following degenerate cases are special: when the precision is known, + the precision shape will encode the value of the precision while the precision inverse scale is positive + infinity. When the mean is known, the mean location will encode the value of the mean while the scale + will be positive infinity. A completely degenerate NormalGamma distribution with known mean and precision is possible as well. + Wikipedia - Normal-Gamma distribution. + + + + + Initializes a new instance of the class. + + The location of the mean. + The scale of the mean. + The shape of the precision. + The inverse scale of the precision. + + + + Initializes a new instance of the class. + + The location of the mean. + The scale of the mean. + The shape of the precision. + The inverse scale of the precision. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The location of the mean. + The scale of the mean. + The shape of the precision. + The inverse scale of the precision. + + + + Gets the location of the mean. + + + + + Gets the scale of the mean. + + + + + Gets the shape of the precision. + + + + + Gets the inverse scale of the precision. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Returns the marginal distribution for the mean of the NormalGamma distribution. + + the marginal distribution for the mean of the NormalGamma distribution. + + + + Returns the marginal distribution for the precision of the distribution. + + The marginal distribution for the precision of the distribution/ + + + + Gets the mean of the distribution. + + The mean of the distribution. + + + + Gets the variance of the distribution. + + The mean of the distribution. + + + + Evaluates the probability density function for a NormalGamma distribution. + + The mean/precision pair of the distribution + Density value + + + + Evaluates the probability density function for a NormalGamma distribution. + + The mean of the distribution + The precision of the distribution + Density value + + + + Evaluates the log probability density function for a NormalGamma distribution. + + The mean/precision pair of the distribution + The log of the density value + + + + Evaluates the log probability density function for a NormalGamma distribution. + + The mean of the distribution + The precision of the distribution + The log of the density value + + + + Generates a sample from the NormalGamma distribution. + + a sample from the distribution. + + + + Generates a sequence of samples from the NormalGamma distribution + + a sequence of samples from the distribution. + + + + Generates a sample from the NormalGamma distribution. + + The random number generator to use. + The location of the mean. + The scale of the mean. + The shape of the precision. + The inverse scale of the precision. + a sample from the distribution. + + + + Generates a sequence of samples from the NormalGamma distribution + + The random number generator to use. + The location of the mean. + The scale of the mean. + The shape of the precision. + The inverse scale of the precision. + a sequence of samples from the distribution. + + + + Continuous Univariate Pareto distribution. + The Pareto distribution is a power law probability distribution that coincides with social, + scientific, geophysical, actuarial, and many other types of observable phenomena. + For details about this distribution, see + Wikipedia - Pareto distribution. + + + + + Initializes a new instance of the class. + + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + If or are negative. + + + + Initializes a new instance of the class. + + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + The random number generator which is used to draw random samples. + If or are negative. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + + + + Gets the scale (xm) of the distribution. Range: xm > 0. + + + + + Gets the shape (α) of the distribution. Range: α > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Draws a random sample from the distribution. + + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Pareto distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + the inverse cumulative density at . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + a sequence of samples from the distribution. + + + + Discrete Univariate Poisson distribution. + + + Distribution is described at Wikipedia - Poisson distribution. + Knuth's method is used to generate Poisson distributed random variables. + f(x) = exp(-λ)*λ^x/x!; + + + + + Initializes a new instance of the class. + + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + If is equal or less then 0.0. + + + + Initializes a new instance of the class. + + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + The random number generator which is used to draw random samples. + If is equal or less then 0.0. + + + + Returns a that represents this instance. + + + A that represents this instance. + + + + + Tests whether the provided values are valid parameters for this distribution. + + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + + + + Gets the Poisson distribution parameter λ. Range: λ > 0. + + + + + Gets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + Approximation, see Wikipedia Poisson distribution + + + + Gets the skewness of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + Approximation, see Wikipedia Poisson distribution + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + the cumulative distribution at location . + + + + + Generates one sample from the Poisson distribution. + + The random source to use. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + A random sample from the Poisson distribution. + + + + Generates one sample from the Poisson distribution by Knuth's method. + + The random source to use. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + A random sample from the Poisson distribution. + + + + Generates one sample from the Poisson distribution by "Rejection method PA". + + The random source to use. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + A random sample from the Poisson distribution. + "Rejection method PA" from "The Computer Generation of Poisson Random Variables" by A. C. Atkinson, + Journal of the Royal Statistical Society Series C (Applied Statistics) Vol. 28, No. 1. (1979) + The article is on pages 29-35. The algorithm given here is on page 32. + + + + Samples a Poisson distributed random variable. + + A sample from the Poisson distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of Poisson distributed random variables. + + a sequence of successes in N trials. + + + + Samples a Poisson distributed random variable. + + The random number generator to use. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + A sample from the Poisson distribution. + + + + Samples a sequence of Poisson distributed random variables. + + The random number generator to use. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Samples a Poisson distributed random variable. + + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + A sample from the Poisson distribution. + + + + Samples a sequence of Poisson distributed random variables. + + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Continuous Univariate Rayleigh distribution. + The Rayleigh distribution (pronounced /ˈreɪli/) is a continuous probability distribution. As an + example of how it arises, the wind speed will have a Rayleigh distribution if the components of + the two-dimensional wind velocity vector are uncorrelated and normally distributed with equal variance. + For details about this distribution, see + Wikipedia - Rayleigh distribution. + + + + + Initializes a new instance of the class. + + The scale (σ) of the distribution. Range: σ > 0. + If is negative. + + + + Initializes a new instance of the class. + + The scale (σ) of the distribution. Range: σ > 0. + The random number generator which is used to draw random samples. + If is negative. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The scale (σ) of the distribution. Range: σ > 0. + + + + Gets the scale (σ) of the distribution. Range: σ > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Draws a random sample from the distribution. + + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Rayleigh distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The scale (σ) of the distribution. Range: σ > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The scale (σ) of the distribution. Range: σ > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The scale (σ) of the distribution. Range: σ > 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The scale (σ) of the distribution. Range: σ > 0. + the inverse cumulative density at . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The scale (σ) of the distribution. Range: σ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The scale (σ) of the distribution. Range: σ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Continuous Univariate Stable distribution. + A random variable is said to be stable (or to have a stable distribution) if it has + the property that a linear combination of two independent copies of the variable has + the same distribution, up to location and scale parameters. + For details about this distribution, see + Wikipedia - Stable distribution. + + + + + Initializes a new instance of the class. + + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + + + + Initializes a new instance of the class. + + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + + + + Gets the stability (α) of the distribution. Range: 2 ≥ α > 0. + + + + + Gets The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + + + + + Gets the scale (c) of the distribution. Range: c > 0. + + + + + Gets the location (μ) of the distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets he entropy of the distribution. + + Always throws a not supported exception. + + + + Gets the skewness of the distribution. + + Throws a not supported exception of Alpha != 2. + + + + Gets the mode of the distribution. + + Throws a not supported exception if Beta != 0. + + + + Gets the median of the distribution. + + Throws a not supported exception if Beta != 0. + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + Throws a not supported exception if Alpha != 2, (Alpha != 1 and Beta !=0), or (Alpha != 0.5 and Beta != 1) + + + + Samples the distribution. + + The random number generator to use. + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + a random number from the distribution. + + + + Draws a random sample from the distribution. + + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Stable distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + the cumulative distribution at location . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + a sequence of samples from the distribution. + + + + Continuous Univariate Student's T-distribution. + Implements the univariate Student t-distribution. For details about this + distribution, see + + Wikipedia - Student's t-distribution. + + We use a slightly generalized version (compared to + Wikipedia) of the Student t-distribution. Namely, one which also + parameterizes the location and scale. See the book "Bayesian Data + Analysis" by Gelman et al. for more details. + The density of the Student t-distribution p(x|mu,scale,dof) = + Gamma((dof+1)/2) (1 + (x - mu)^2 / (scale * scale * dof))^(-(dof+1)/2) / + (Gamma(dof/2)*Sqrt(dof*pi*scale)). + The distribution will use the by + default. Users can get/set the random number generator by using the + property. + The statistics classes will check all the incoming parameters + whether they are in the allowed range. This might involve heavy + computation. Optionally, by setting Control.CheckDistributionParameters + to false, all parameter checks can be turned off. + + + + Initializes a new instance of the StudentT class. This is a Student t-distribution with location 0.0 + scale 1.0 and degrees of freedom 1. + + + + + Initializes a new instance of the StudentT class with a particular location, scale and degrees of + freedom. + + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + + + + Initializes a new instance of the StudentT class with a particular location, scale and degrees of + freedom. + + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + + + + Gets the location (μ) of the Student t-distribution. + + + + + Gets the scale (σ) of the Student t-distribution. Range: σ > 0. + + + + + Gets the degrees of freedom (ν) of the Student t-distribution. Range: ν > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the Student t-distribution. + + + + + Gets the variance of the Student t-distribution. + + + + + Gets the standard deviation of the Student t-distribution. + + + + + Gets the entropy of the Student t-distribution. + + + + + Gets the skewness of the Student t-distribution. + + + + + Gets the mode of the Student t-distribution. + + + + + Gets the median of the Student t-distribution. + + + + + Gets the minimum of the Student t-distribution. + + + + + Gets the maximum of the Student t-distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Samples student-t distributed random variables. + + The algorithm is method 2 in section 5, chapter 9 + in L. Devroye's "Non-Uniform Random Variate Generation" + The random number generator to use. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + a random number from the standard student-t distribution. + + + + Generates a sample from the Student t-distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Student t-distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Generates a sample from the Student t-distribution. + + The random number generator to use. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the Student t-distribution using the Box-Muller algorithm. + + The random number generator to use. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the Student t-distribution. + + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the Student t-distribution using the Box-Muller algorithm. + + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + a sequence of samples from the distribution. + + + + Triangular distribution. + For details, see Wikipedia - Triangular distribution. + + The distribution will use the by default. + Users can get/set the random number generator by using the property. + The statistics classes will check whether all the incoming parameters are in the allowed range. This might involve heavy computation. Optionally, by setting Control.CheckDistributionParameters + to false, all parameter checks can be turned off. + + + + Initializes a new instance of the Triangular class with the given lower bound, upper bound and mode. + + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + If the upper bound is smaller than the mode or if the mode is smaller than the lower bound. + + + + Initializes a new instance of the Triangular class with the given lower bound, upper bound and mode. + + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + The random number generator which is used to draw random samples. + If the upper bound is smaller than the mode or if the mode is smaller than the lower bound. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + + + + Gets the lower bound of the distribution. + + + + + Gets the upper bound of the distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + + Gets the skewness of the distribution. + + + + + Gets or sets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Generates a sample from the Triangular distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Triangular distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + the inverse cumulative density at . + + + + + Generates a sample from the Triangular distribution. + + The random number generator to use. + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + a sample from the distribution. + + + + Generates a sequence of samples from the Triangular distribution. + + The random number generator to use. + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + a sequence of samples from the distribution. + + + + Generates a sample from the Triangular distribution. + + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + a sample from the distribution. + + + + Generates a sequence of samples from the Triangular distribution. + + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + a sequence of samples from the distribution. + + + + Continuous Univariate Weibull distribution. + For details about this distribution, see + Wikipedia - Weibull distribution. + + + The Weibull distribution is parametrized by a shape and scale parameter. + + + + + Reusable intermediate result 1 / (_scale ^ _shape) + + + By caching this parameter we can get slightly better numerics precision + in certain constellations without any additional computations. + + + + + Initializes a new instance of the Weibull class. + + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + + + + Initializes a new instance of the Weibull class. + + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + + + + Gets the shape (k) of the Weibull distribution. Range: k > 0. + + + + + Gets the scale (λ) of the Weibull distribution. Range: λ > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the Weibull distribution. + + + + + Gets the variance of the Weibull distribution. + + + + + Gets the standard deviation of the Weibull distribution. + + + + + Gets the entropy of the Weibull distribution. + + + + + Gets the skewness of the Weibull distribution. + + + + + Gets the mode of the Weibull distribution. + + + + + Gets the median of the Weibull distribution. + + + + + Gets the minimum of the Weibull distribution. + + + + + Gets the maximum of the Weibull distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Generates a sample from the Weibull distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Weibull distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + the cumulative distribution at location . + + + + + Implemented according to: Parameter estimation of the Weibull probability distribution, 1994, Hongzhu Qiao, Chris P. Tsokos + + + + Returns a Weibull distribution. + + + + Generates a sample from the Weibull distribution. + + The random number generator to use. + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the Weibull distribution. + + The random number generator to use. + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the Weibull distribution. + + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the Weibull distribution. + + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Multivariate Wishart distribution. This distribution is + parameterized by the degrees of freedom nu and the scale matrix S. The Wishart distribution + is the conjugate prior for the precision (inverse covariance) matrix of the multivariate + normal distribution. + Wikipedia - Wishart distribution. + + + + + The degrees of freedom for the Wishart distribution. + + + + + The scale matrix for the Wishart distribution. + + + + + Caches the Cholesky factorization of the scale matrix. + + + + + Initializes a new instance of the class. + + The degrees of freedom (n) for the Wishart distribution. + The scale matrix (V) for the Wishart distribution. + + + + Initializes a new instance of the class. + + The degrees of freedom (n) for the Wishart distribution. + The scale matrix (V) for the Wishart distribution. + The random number generator which is used to draw random samples. + + + + Tests whether the provided values are valid parameters for this distribution. + + The degrees of freedom (n) for the Wishart distribution. + The scale matrix (V) for the Wishart distribution. + + + + Gets or sets the degrees of freedom (n) for the Wishart distribution. + + + + + Gets or sets the scale matrix (V) for the Wishart distribution. + + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + The mean of the distribution. + + + + Gets the mode of the distribution. + + The mode of the distribution. + + + + Gets the variance of the distribution. + + The variance of the distribution. + + + + Evaluates the probability density function for the Wishart distribution. + + The matrix at which to evaluate the density at. + If the argument does not have the same dimensions as the scale matrix. + the density at . + + + + Samples a Wishart distributed random variable using the method + Algorithm AS 53: Wishart Variate Generator + W. B. Smith and R. R. Hocking + Applied Statistics, Vol. 21, No. 3 (1972), pp. 341-345 + + A random number from this distribution. + + + + Samples a Wishart distributed random variable using the method + Algorithm AS 53: Wishart Variate Generator + W. B. Smith and R. R. Hocking + Applied Statistics, Vol. 21, No. 3 (1972), pp. 341-345 + + The random number generator to use. + The degrees of freedom (n) for the Wishart distribution. + The scale matrix (V) for the Wishart distribution. + a sequence of samples from the distribution. + + + + Samples the distribution. + + The random number generator to use. + The degrees of freedom (n) for the Wishart distribution. + The scale matrix (V) for the Wishart distribution. + The cholesky decomposition to use. + a random number from the distribution. + + + + Discrete Univariate Zipf distribution. + Zipf's law, an empirical law formulated using mathematical statistics, refers to the fact + that many types of data studied in the physical and social sciences can be approximated with + a Zipfian distribution, one of a family of related discrete power law probability distributions. + For details about this distribution, see + Wikipedia - Zipf distribution. + + + + + The s parameter of the distribution. + + + + + The n parameter of the distribution. + + + + + Initializes a new instance of the class. + + The s parameter of the distribution. + The n parameter of the distribution. + + + + Initializes a new instance of the class. + + The s parameter of the distribution. + The n parameter of the distribution. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The s parameter of the distribution. + The n parameter of the distribution. + + + + Gets or sets the s parameter of the distribution. + + + + + Gets or sets the n parameter of the distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The s parameter of the distribution. + The n parameter of the distribution. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The s parameter of the distribution. + The n parameter of the distribution. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The s parameter of the distribution. + The n parameter of the distribution. + the cumulative distribution at location . + + + + + Generates a sample from the Zipf distribution without doing parameter checking. + + The random number generator to use. + The s parameter of the distribution. + The n parameter of the distribution. + a random number from the Zipf distribution. + + + + Draws a random sample from the distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of zipf distributed random variables. + + a sequence of samples from the distribution. + + + + Samples a random variable. + + The random number generator to use. + The s parameter of the distribution. + The n parameter of the distribution. + + + + Samples a sequence of this random variable. + + The random number generator to use. + The s parameter of the distribution. + The n parameter of the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The s parameter of the distribution. + The n parameter of the distribution. + + + + Samples a random variable. + + The s parameter of the distribution. + The n parameter of the distribution. + + + + Samples a sequence of this random variable. + + The s parameter of the distribution. + The n parameter of the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The s parameter of the distribution. + The n parameter of the distribution. + + + + Integer number theory functions. + + + + + Canonical Modulus. The result has the sign of the divisor. + + + + + Canonical Modulus. The result has the sign of the divisor. + + + + + Canonical Modulus. The result has the sign of the divisor. + + + + + Canonical Modulus. The result has the sign of the divisor. + + + + + Remainder (% operator). The result has the sign of the dividend. + + + + + Remainder (% operator). The result has the sign of the dividend. + + + + + Remainder (% operator). The result has the sign of the dividend. + + + + + Remainder (% operator). The result has the sign of the dividend. + + + + + Find out whether the provided 32 bit integer is an even number. + + The number to very whether it's even. + True if and only if it is an even number. + + + + Find out whether the provided 64 bit integer is an even number. + + The number to very whether it's even. + True if and only if it is an even number. + + + + Find out whether the provided 32 bit integer is an odd number. + + The number to very whether it's odd. + True if and only if it is an odd number. + + + + Find out whether the provided 64 bit integer is an odd number. + + The number to very whether it's odd. + True if and only if it is an odd number. + + + + Find out whether the provided 32 bit integer is a perfect power of two. + + The number to very whether it's a power of two. + True if and only if it is a power of two. + + + + Find out whether the provided 64 bit integer is a perfect power of two. + + The number to very whether it's a power of two. + True if and only if it is a power of two. + + + + Find out whether the provided 32 bit integer is a perfect square, i.e. a square of an integer. + + The number to very whether it's a perfect square. + True if and only if it is a perfect square. + + + + Find out whether the provided 64 bit integer is a perfect square, i.e. a square of an integer. + + The number to very whether it's a perfect square. + True if and only if it is a perfect square. + + + + Raises 2 to the provided integer exponent (0 <= exponent < 31). + + The exponent to raise 2 up to. + 2 ^ exponent. + + + + + Raises 2 to the provided integer exponent (0 <= exponent < 63). + + The exponent to raise 2 up to. + 2 ^ exponent. + + + + + Evaluate the binary logarithm of an integer number. + + Two-step method using a De Bruijn-like sequence table lookup. + + + + Find the closest perfect power of two that is larger or equal to the provided + 32 bit integer. + + The number of which to find the closest upper power of two. + A power of two. + + + + + Find the closest perfect power of two that is larger or equal to the provided + 64 bit integer. + + The number of which to find the closest upper power of two. + A power of two. + + + + + Returns the greatest common divisor (gcd) of two integers using Euclid's algorithm. + + First Integer: a. + Second Integer: b. + Greatest common divisor gcd(a,b) + + + + Returns the greatest common divisor (gcd) of a set of integers using Euclid's + algorithm. + + List of Integers. + Greatest common divisor gcd(list of integers) + + + + Returns the greatest common divisor (gcd) of a set of integers using Euclid's algorithm. + + List of Integers. + Greatest common divisor gcd(list of integers) + + + + Computes the extended greatest common divisor, such that a*x + b*y = gcd(a,b). + + First Integer: a. + Second Integer: b. + Resulting x, such that a*x + b*y = gcd(a,b). + Resulting y, such that a*x + b*y = gcd(a,b) + Greatest common divisor gcd(a,b) + + + long x,y,d; + d = Fn.GreatestCommonDivisor(45,18,out x, out y); + -> d == 9 && x == 1 && y == -2 + + The gcd of 45 and 18 is 9: 18 = 2*9, 45 = 5*9. 9 = 1*45 -2*18, therefore x=1 and y=-2. + + + + + Returns the least common multiple (lcm) of two integers using Euclid's algorithm. + + First Integer: a. + Second Integer: b. + Least common multiple lcm(a,b) + + + + Returns the least common multiple (lcm) of a set of integers using Euclid's algorithm. + + List of Integers. + Least common multiple lcm(list of integers) + + + + Returns the least common multiple (lcm) of a set of integers using Euclid's algorithm. + + List of Integers. + Least common multiple lcm(list of integers) + + + + Collection of functions equivalent to those provided by Microsoft Excel + but backed instead by Math.NET Numerics. + We do not recommend to use them except in an intermediate phase when + porting over solutions previously implemented in Excel. + + + + + An algorithm failed to converge. + + + + + An algorithm failed to converge due to a numerical breakdown. + + + + + An error occured calling native provider function. + + + + + An error occured calling native provider function. + + + + + Native provider was unable to allocate sufficent memory. + + + + + Native provider failed LU inversion do to a singular U matrix. + + + + + Compound Monthly Return or Geometric Return or Annualized Return + + + + + Average Gain or Gain Mean + This is a simple average (arithmetic mean) of the periods with a gain. It is calculated by summing the returns for gain periods (return 0) + and then dividing the total by the number of gain periods. + + http://www.offshore-library.com/kb/statistics.php + + + + Average Loss or LossMean + This is a simple average (arithmetic mean) of the periods with a loss. It is calculated by summing the returns for loss periods (return < 0) + and then dividing the total by the number of loss periods. + + http://www.offshore-library.com/kb/statistics.php + + + + Calculation is similar to Standard Deviation , except it calculates an average (mean) return only for periods with a gain + and measures the variation of only the gain periods around the gain mean. Measures the volatility of upside performance. + © Copyright 1996, 1999 Gary L.Gastineau. First Edition. © 1992 Swiss Bank Corporation. + + + + + Similar to standard deviation, except this statistic calculates an average (mean) return for only the periods with a loss and then + measures the variation of only the losing periods around this loss mean. This statistic measures the volatility of downside performance. + + http://www.offshore-library.com/kb/statistics.php + + + + This measure is similar to the loss standard deviation except the downside deviation + considers only returns that fall below a defined minimum acceptable return (MAR) rather than the arithmetic mean. + For example, if the MAR is 7%, the downside deviation would measure the variation of each period that falls below + 7%. (The loss standard deviation, on the other hand, would take only losing periods, calculate an average return for + the losing periods, and then measure the variation between each losing return and the losing return average). + + + + + A measure of volatility in returns below the mean. It's similar to standard deviation, but it only + looks at periods where the investment return was less than average return. + + + + + Measures a fund’s average gain in a gain period divided by the fund’s average loss in a losing + period. Periods can be monthly or quarterly depending on the data frequency. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Example: 1e-14. + Maximum number of iterations. Example: 100. + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first derivative of the function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Example: 1e-14. + Maximum number of iterations. Example: 100. + + + + Find both complex roots of the quadratic equation c + b*x + a*x^2 = 0. + Note the special coefficient order ascending by exponent (consistent with polynomials). + + + + + Find all three complex roots of the cubic equation d + c*x + b*x^2 + a*x^3 = 0. + Note the special coefficient order ascending by exponent (consistent with polynomials). + + + + + Find all roots of the Chebychev polynomial of the first kind. + + The polynomial order and therefore the number of roots. + The real domain interval begin where to start sampling. + The real domain interval end where to stop sampling. + Samples in [a,b] at (b+a)/2+(b-1)/2*cos(pi*(2i-1)/(2n)) + + + + Find all roots of the Chebychev polynomial of the second kind. + + The polynomial order and therefore the number of roots. + The real domain interval begin where to start sampling. + The real domain interval end where to stop sampling. + Samples in [a,b] at (b+a)/2+(b-1)/2*cos(pi*i/(n-1)) + + + + Least-Squares Curve Fitting Routines + + + + + Least-Squares fitting the points (x,y) to a line y : x -> a+b*x, + returning its best fitting parameters as [a, b] array, + where a is the intercept and b the slope. + + + + + Least-Squares fitting the points (x,y) to a line y : x -> a+b*x, + returning a function y' for the best fitting line. + + + + + Least-Squares fitting the points (X,y) = ((x0,x1,..,xk),y) to a linear surface y : X -> p0*x0 + p1*x1 + ... + pk*xk, + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + If an intercept is added, its coefficient will be prepended to the resulting parameters. + + + + + Least-Squares fitting the points (X,y) = ((x0,x1,..,xk),y) to a linear surface y : X -> p0*x0 + p1*x1 + ... + pk*xk, + returning a function y' for the best fitting combination. + If an intercept is added, its coefficient will be prepended to the resulting parameters. + + + + + Weighted Least-Squares fitting the points (X,y) = ((x0,x1,..,xk),y) and weights w to a linear surface y : X -> p0*x0 + p1*x1 + ... + pk*xk, + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + + + + + Least-Squares fitting the points (x,y) to a k-order polynomial y : x -> p0 + p1*x + p2*x^2 + ... + pk*x^k, + returning its best fitting parameters as [p0, p1, p2, ..., pk] array, compatible with Evaluate.Polynomial. + A polynomial with order/degree k has (k+1) coefficients and thus requires at least (k+1) samples. + + + + + Least-Squares fitting the points (x,y) to a k-order polynomial y : x -> p0 + p1*x + p2*x^2 + ... + pk*x^k, + returning a function y' for the best fitting polynomial. + A polynomial with order/degree k has (k+1) coefficients and thus requires at least (k+1) samples. + + + + + Weighted Least-Squares fitting the points (x,y) and weights w to a k-order polynomial y : x -> p0 + p1*x + p2*x^2 + ... + pk*x^k, + returning its best fitting parameters as [p0, p1, p2, ..., pk] array, compatible with Evaluate.Polynomial. + A polynomial with order/degree k has (k+1) coefficients and thus requires at least (k+1) samples. + + + + + Least-Squares fitting the points (x,y) to an arbitrary linear combination y : x -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + + + + + Least-Squares fitting the points (x,y) to an arbitrary linear combination y : x -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning a function y' for the best fitting combination. + + + + + Least-Squares fitting the points (x,y) to an arbitrary linear combination y : x -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + + + + + Least-Squares fitting the points (x,y) to an arbitrary linear combination y : x -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning a function y' for the best fitting combination. + + + + + Least-Squares fitting the points (X,y) = ((x0,x1,..,xk),y) to an arbitrary linear combination y : X -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + + + + + Least-Squares fitting the points (X,y) = ((x0,x1,..,xk),y) to an arbitrary linear combination y : X -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning a function y' for the best fitting combination. + + + + + Least-Squares fitting the points (X,y) = ((x0,x1,..,xk),y) to an arbitrary linear combination y : X -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + + + + + Least-Squares fitting the points (X,y) = ((x0,x1,..,xk),y) to an arbitrary linear combination y : X -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning a function y' for the best fitting combination. + + + + + Least-Squares fitting the points (T,y) = (T,y) to an arbitrary linear combination y : X -> p0*f0(T) + p1*f1(T) + ... + pk*fk(T), + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + + + + + Least-Squares fitting the points (T,y) = (T,y) to an arbitrary linear combination y : X -> p0*f0(T) + p1*f1(T) + ... + pk*fk(T), + returning a function y' for the best fitting combination. + + + + + Least-Squares fitting the points (T,y) = (T,y) to an arbitrary linear combination y : X -> p0*f0(T) + p1*f1(T) + ... + pk*fk(T), + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + + + + + Least-Squares fitting the points (T,y) = (T,y) to an arbitrary linear combination y : X -> p0*f0(T) + p1*f1(T) + ... + pk*fk(T), + returning a function y' for the best fitting combination. + + + + + Generate samples by sampling a function at the provided points. + + + + + Generate a sample sequence by sampling a function at the provided point sequence. + + + + + Generate samples by sampling a function at the provided points. + + + + + Generate a sample sequence by sampling a function at the provided point sequence. + + + + + Generate a linearly spaced sample vector of the given length between the specified values (inclusive). + Equivalent to MATLAB linspace but with the length as first instead of last argument. + + + + + Generate samples by sampling a function at linearly spaced points between the specified values (inclusive). + + + + + Generate a base 10 logarithmically spaced sample vector of the given length between the specified decade exponents (inclusive). + Equivalent to MATLAB logspace but with the length as first instead of last argument. + + + + + Generate samples by sampling a function at base 10 logarithmically spaced points between the specified decade exponents (inclusive). + + + + + Generate a linearly spaced sample vector within the inclusive interval (start, stop) and step 1. + Equivalent to MATLAB colon operator (:). + + + + + Generate a linearly spaced sample vector within the inclusive interval (start, stop) and step 1. + Equivalent to MATLAB colon operator (:). + + + + + Generate a linearly spaced sample vector within the inclusive interval (start, stop) and the provided step. + The start value is aways included as first value, but stop is only included if it stop-start is a multiple of step. + Equivalent to MATLAB double colon operator (::). + + + + + Generate a linearly spaced sample vector within the inclusive interval (start, stop) and the provided step. + The start value is aways included as first value, but stop is only included if it stop-start is a multiple of step. + Equivalent to MATLAB double colon operator (::). + + + + + Generate a linearly spaced sample vector within the inclusive interval (start, stop) and the provide step. + The start value is aways included as first value, but stop is only included if it stop-start is a multiple of step. + Equivalent to MATLAB double colon operator (::). + + + + + Generate samples by sampling a function at linearly spaced points within the inclusive interval (start, stop) and the provide step. + The start value is aways included as first value, but stop is only included if it stop-start is a multiple of step. + + + + + Create a periodic wave. + + The number of samples to generate. + Samples per time unit (Hz). Must be larger than twice the frequency to satisfy the Nyquist criterion. + Frequency in periods per time unit (Hz). + The length of the period when sampled at one sample per time unit. This is the interval of the periodic domain, a typical value is 1.0, or 2*Pi for angular functions. + Optional phase offset. + Optional delay, relative to the phase. + + + + Create a periodic wave. + + The number of samples to generate. + The function to apply to each of the values and evaluate the resulting sample. + Samples per time unit (Hz). Must be larger than twice the frequency to satisfy the Nyquist criterion. + Frequency in periods per time unit (Hz). + The length of the period when sampled at one sample per time unit. This is the interval of the periodic domain, a typical value is 1.0, or 2*Pi for angular functions. + Optional phase offset. + Optional delay, relative to the phase. + + + + Create an infinite periodic wave sequence. + + Samples per time unit (Hz). Must be larger than twice the frequency to satisfy the Nyquist criterion. + Frequency in periods per time unit (Hz). + The length of the period when sampled at one sample per time unit. This is the interval of the periodic domain, a typical value is 1.0, or 2*Pi for angular functions. + Optional phase offset. + Optional delay, relative to the phase. + + + + Create an infinite periodic wave sequence. + + The function to apply to each of the values and evaluate the resulting sample. + Samples per time unit (Hz). Must be larger than twice the frequency to satisfy the Nyquist criterion. + Frequency in periods per time unit (Hz). + The length of the period when sampled at one sample per time unit. This is the interval of the periodic domain, a typical value is 1.0, or 2*Pi for angular functions. + Optional phase offset. + Optional delay, relative to the phase. + + + + Create a Sine wave. + + The number of samples to generate. + Samples per time unit (Hz). Must be larger than twice the frequency to satisfy the Nyquist criterion. + Frequency in periods per time unit (Hz). + The maximal reached peak. + The mean, or DC part, of the signal. + Optional phase offset. + Optional delay, relative to the phase. + + + + Create an infinite Sine wave sequence. + + Samples per unit. + Frequency in samples per unit. + The maximal reached peak. + The mean, or DC part, of the signal. + Optional phase offset. + Optional delay, relative to the phase. + + + + Create a periodic square wave, starting with the high phase. + + The number of samples to generate. + Number of samples of the high phase. + Number of samples of the low phase. + Sample value to be emitted during the low phase. + Sample value to be emitted during the high phase. + Optional delay. + + + + Create an infinite periodic square wave sequence, starting with the high phase. + + Number of samples of the high phase. + Number of samples of the low phase. + Sample value to be emitted during the low phase. + Sample value to be emitted during the high phase. + Optional delay. + + + + Create a periodic triangle wave, starting with the raise phase from the lowest sample. + + The number of samples to generate. + Number of samples of the raise phase. + Number of samples of the fall phase. + Lowest sample value. + Highest sample value. + Optional delay. + + + + Create an infinite periodic triangle wave sequence, starting with the raise phase from the lowest sample. + + Number of samples of the raise phase. + Number of samples of the fall phase. + Lowest sample value. + Highest sample value. + Optional delay. + + + + Create a periodic sawtooth wave, starting with the lowest sample. + + The number of samples to generate. + Number of samples a full sawtooth period. + Lowest sample value. + Highest sample value. + Optional delay. + + + + Create an infinite periodic sawtooth wave sequence, starting with the lowest sample. + + Number of samples a full sawtooth period. + Lowest sample value. + Highest sample value. + Optional delay. + + + + Create an array with each field set to the same value. + + The number of samples to generate. + The value that each field should be set to. + + + + Create an infinite sequence where each element has the same value. + + The value that each element should be set to. + + + + Create a Heaviside Step sample vector. + + The number of samples to generate. + The maximal reached peak. + Offset to the time axis. + + + + Create an infinite Heaviside Step sample sequence. + + The maximal reached peak. + Offset to the time axis. + + + + Create a Kronecker Delta impulse sample vector. + + The number of samples to generate. + The maximal reached peak. + Offset to the time axis. Zero or positive. + + + + Create a Kronecker Delta impulse sample vector. + + The maximal reached peak. + Offset to the time axis, hence the sample index of the impulse. + + + + Create a periodic Kronecker Delta impulse sample vector. + + The number of samples to generate. + impulse sequence period. + The maximal reached peak. + Offset to the time axis. Zero or positive. + + + + Create a Kronecker Delta impulse sample vector. + + impulse sequence period. + The maximal reached peak. + Offset to the time axis. Zero or positive. + + + + Generate samples generated by the given computation. + + + + + Generate an infinite sequence generated by the given computation. + + + + + Create random samples, uniform between 0 and 1. + Faster than other methods but with reduced guarantees on randomness. + + + + + Create an infinite random sample sequence, uniform between 0 and 1. + Faster than other methods but with reduced guarantees on randomness. + + + + + Generate samples by sampling a function at samples from a probability distribution, uniform between 0 and 1. + Faster than other methods but with reduced guarantees on randomness. + + + + + Generate a sample sequence by sampling a function at samples from a probability distribution, uniform between 0 and 1. + Faster than other methods but with reduced guarantees on randomness. + + + + + Generate samples by sampling a function at sample pairs from a probability distribution, uniform between 0 and 1. + Faster than other methods but with reduced guarantees on randomness. + + + + + Generate a sample sequence by sampling a function at sample pairs from a probability distribution, uniform between 0 and 1. + Faster than other methods but with reduced guarantees on randomness. + + + + + Create samples with independent amplitudes of standard distribution. + + + + + Create an infinite sample sequence with independent amplitudes of standard distribution. + + + + + Create samples with independent amplitudes of normal distribution and a flat spectral density. + + + + + Create an infinite sample sequence with independent amplitudes of normal distribution and a flat spectral density. + + + + + Create samples with independent amplitudes of normal distribution and a flat spectral density. + + + + + Create an infinite sample sequence with independent amplitudes of normal distribution and a flat spectral density. + + + + + Create skew alpha stable samples. + + The number of samples to generate. + Stability alpha-parameter of the stable distribution + Skewness beta-parameter of the stable distribution + Scale c-parameter of the stable distribution + Location mu-parameter of the stable distribution + + + + Create skew alpha stable samples. + + Stability alpha-parameter of the stable distribution + Skewness beta-parameter of the stable distribution + Scale c-parameter of the stable distribution + Location mu-parameter of the stable distribution + + + + Create random samples. + + + + + Create an infinite random sample sequence. + + + + + Create random samples. + + + + + Create an infinite random sample sequence. + + + + + Create random samples. + + + + + Create an infinite random sample sequence. + + + + + Create random samples. + + + + + Create an infinite random sample sequence. + + + + + Generate samples by sampling a function at samples from a probability distribution. + + + + + Generate a sample sequence by sampling a function at samples from a probability distribution. + + + + + Generate samples by sampling a function at sample pairs from a probability distribution. + + + + + Generate a sample sequence by sampling a function at sample pairs from a probability distribution. + + + + + Globalized String Handling Helpers + + + + + Tries to get a from the format provider, + returning the current culture if it fails. + + + An that supplies culture-specific + formatting information. + + A instance. + + + + Tries to get a from the format + provider, returning the current culture if it fails. + + + An that supplies culture-specific + formatting information. + + A instance. + + + + Tries to get a from the format provider, returning the current culture if it fails. + + + An that supplies culture-specific + formatting information. + + A instance. + + + + Globalized Parsing: Tokenize a node by splitting it into several nodes. + + Node that contains the trimmed string to be tokenized. + List of keywords to tokenize by. + keywords to skip looking for (because they've already been handled). + + + + Globalized Parsing: Parse a double number + + First token of the number. + The parsed double number using the current culture information. + + + + + Globalized Parsing: Parse a float number + + First token of the number. + The parsed float number using the current culture information. + + + + + Calculates the R-Squared value, also known as coefficient of determination, + given modelled and observed values + + The values expected from the modelled + The actual data set values obtained + Squared Person product-momentum correlation coefficient. + + + + Calculates the R value, also known as linear correlation coefficient, + given modelled and observed values + + The values expected from the modelled + The actual data set values obtained + Person product-momentum correlation coefficient. + + + + Calculates the Standard Error of the regression, given a sequence of + modeled/predicted values, and a sequence of actual/observed values + + The modelled/predicted values + The observed/actual values + The Standard Error of the regression + + + + Calculates the Standard Error of the regression, given a sequence of + modeled/predicted values, and a sequence of actual/observed values + + The modelled/predicted values + The observed/actual values + The degrees of freedom by which the + number of samples is reduced for performing the Standard Error calculation + The Standard Error of the regression + + + + Complex Fast (FFT) Implementation of the Discrete Fourier Transform (DFT). + + + Complex Fast (FFT) Implementation of the Discrete Fourier Transform (DFT). + + + Complex Fast (FFT) Implementation of the Discrete Fourier Transform (DFT). + + + Complex Fast (FFT) Implementation of the Discrete Fourier Transform (DFT). + + + + + Sequences with length greater than Math.Sqrt(Int32.MaxValue) + 1 + will cause k*k in the Bluestein sequence to overflow (GH-286). + + + + + Generate the bluestein sequence for the provided problem size. + + Number of samples. + Bluestein sequence exp(I*Pi*k^2/N) + + + + Convolution with the bluestein sequence (Parallel Version). + + Sample Vector. + + + + Swap the real and imaginary parts of each sample. + + Sample Vector. + + + + Bluestein generic FFT for arbitrary sized sample vectors. + + Time-space sample vector. + Fourier series exponent sign. + + + + Applies the forward Fast Fourier Transform (FFT) to arbitrary-length sample vectors. + + Sample vector, where the FFT is evaluated in place. + + + + Applies the forward Fast Fourier Transform (FFT) to arbitrary-length sample vectors. + + Sample vector, where the FFT is evaluated in place. + Fourier Transform Convention Options. + + + + Applies the forward Fast Fourier Transform (FFT) to arbitrary-length sample vectors. + + Real part of the sample vector, where the FFT is evaluated in place. + Imaginary part of the sample vector, where the FFT is evaluated in place. + Fourier Transform Convention Options. + + + + Packed Real-Complex forward Fast Fourier Transform (FFT) to arbitrary-length sample vectors. + Since for real-valued time samples the complex spectrum is conjugate-even (symmetry), + the spectrum can be fully reconstructed form the positive frequencies only (first half). + The data array needs to be N+2 (if N is even) or N+1 (if N is odd) long in order to support such a packed spectrum. + + Data array of length N+2 (if N is even) or N+1 (if N is odd). + The number of samples. + Fourier Transform Convention Options. + + + + Applies the forward Fast Fourier Transform (FFT) to multiple dimensional sample data. + + Sample data, where the FFT is evaluated in place. + + The data size per dimension. The first dimension is the major one. + For example, with two dimensions "rows" and "columns" the samples are assumed to be organized row by row. + + Fourier Transform Convention Options. + + + + Applies the forward Fast Fourier Transform (FFT) to two dimensional sample data. + + Sample data, organized row by row, where the FFT is evaluated in place + The number of rows. + The number of columns. + Data available organized column by column instead of row by row can be processed directly by swapping the rows and columns arguments. + Fourier Transform Convention Options. + + + + Applies the forward Fast Fourier Transform (FFT) to a two dimensional data in form of a matrix. + + Sample matrix, where the FFT is evaluated in place + Fourier Transform Convention Options. + + + + Applies the inverse Fast Fourier Transform (iFFT) to arbitrary-length sample vectors. + + Spectrum data, where the iFFT is evaluated in place. + + + + Applies the inverse Fast Fourier Transform (iFFT) to arbitrary-length sample vectors. + + Spectrum data, where the iFFT is evaluated in place. + Fourier Transform Convention Options. + + + + Applies the inverse Fast Fourier Transform (iFFT) to arbitrary-length sample vectors. + + Real part of the sample vector, where the iFFT is evaluated in place. + Imaginary part of the sample vector, where the iFFT is evaluated in place. + Fourier Transform Convention Options. + + + + Packed Real-Complex inverse Fast Fourier Transform (iFFT) to arbitrary-length sample vectors. + Since for real-valued time samples the complex spectrum is conjugate-even (symmetry), + the spectrum can be fully reconstructed form the positive frequencies only (first half). + The data array needs to be N+2 (if N is even) or N+1 (if N is odd) long in order to support such a packed spectrum. + + Data array of length N+2 (if N is even) or N+1 (if N is odd). + The number of samples. + Fourier Transform Convention Options. + + + + Applies the inverse Fast Fourier Transform (iFFT) to multiple dimensional sample data. + + Spectrum data, where the iFFT is evaluated in place. + + The data size per dimension. The first dimension is the major one. + For example, with two dimensions "rows" and "columns" the samples are assumed to be organized row by row. + + Fourier Transform Convention Options. + + + + Applies the inverse Fast Fourier Transform (iFFT) to two dimensional sample data. + + Sample data, organized row by row, where the iFFT is evaluated in place + The number of rows. + The number of columns. + Data available organized column by column instead of row by row can be processed directly by swapping the rows and columns arguments. + Fourier Transform Convention Options. + + + + Applies the inverse Fast Fourier Transform (iFFT) to a two dimensional data in form of a matrix. + + Sample matrix, where the iFFT is evaluated in place + Fourier Transform Convention Options. + + + + Naive forward DFT, useful e.g. to verify faster algorithms. + + Time-space sample vector. + Fourier Transform Convention Options. + Corresponding frequency-space vector. + + + + Naive inverse DFT, useful e.g. to verify faster algorithms. + + Frequency-space sample vector. + Fourier Transform Convention Options. + Corresponding time-space vector. + + + + Radix-2 forward FFT for power-of-two sized sample vectors. + + Sample vector, where the FFT is evaluated in place. + Fourier Transform Convention Options. + + + + + Radix-2 inverse FFT for power-of-two sized sample vectors. + + Sample vector, where the FFT is evaluated in place. + Fourier Transform Convention Options. + + + + + Bluestein forward FFT for arbitrary sized sample vectors. + + Sample vector, where the FFT is evaluated in place. + Fourier Transform Convention Options. + + + + Bluestein inverse FFT for arbitrary sized sample vectors. + + Sample vector, where the FFT is evaluated in place. + Fourier Transform Convention Options. + + + + Extract the exponent sign to be used in forward transforms according to the + provided convention options. + + Fourier Transform Convention Options. + Fourier series exponent sign. + + + + Rescale FFT-the resulting vector according to the provided convention options. + + Fourier Transform Convention Options. + Sample Vector. + + + + Rescale the iFFT-resulting vector according to the provided convention options. + + Fourier Transform Convention Options. + Sample Vector. + + + + Generate the frequencies corresponding to each index in frequency space. + The frequency space has a resolution of sampleRate/N. + Index 0 corresponds to the DC part, the following indices correspond to + the positive frequencies up to the Nyquist frequency (sampleRate/2), + followed by the negative frequencies wrapped around. + + Number of samples. + The sampling rate of the time-space data. + + + + Naive generic DFT, useful e.g. to verify faster algorithms. + + Time-space sample vector. + Fourier series exponent sign. + Corresponding frequency-space vector. + + + + Radix-2 Reorder Helper Method + + Sample type + Sample vector + + + + Radix-2 Step Helper Method + + Sample vector. + Fourier series exponent sign. + Level Group Size. + Index inside of the level. + + + + Radix-2 generic FFT for power-of-two sized sample vectors. + + Sample vector, where the FFT is evaluated in place. + Fourier series exponent sign. + + + + + Radix-2 generic FFT for power-of-two sample vectors (Parallel Version). + + Sample vector, where the FFT is evaluated in place. + Fourier series exponent sign. + + + + + Fourier Transform Convention + + + + + Inverse integrand exponent (forward: positive sign; inverse: negative sign). + + + + + Only scale by 1/N in the inverse direction; No scaling in forward direction. + + + + + Don't scale at all (neither on forward nor on inverse transformation). + + + + + Universal; Symmetric scaling and common exponent (used in Maple). + + + + + Only scale by 1/N in the inverse direction; No scaling in forward direction (used in Matlab). [= AsymmetricScaling] + + + + + Inverse integrand exponent; No scaling at all (used in all Numerical Recipes based implementations). [= InverseExponent | NoScaling] + + + + + Fast (FHT) Implementation of the Discrete Hartley Transform (DHT). + + + Fast (FHT) Implementation of the Discrete Hartley Transform (DHT). + + + + + Naive forward DHT, useful e.g. to verify faster algorithms. + + Time-space sample vector. + Hartley Transform Convention Options. + Corresponding frequency-space vector. + + + + Naive inverse DHT, useful e.g. to verify faster algorithms. + + Frequency-space sample vector. + Hartley Transform Convention Options. + Corresponding time-space vector. + + + + Rescale FFT-the resulting vector according to the provided convention options. + + Fourier Transform Convention Options. + Sample Vector. + + + + Rescale the iFFT-resulting vector according to the provided convention options. + + Fourier Transform Convention Options. + Sample Vector. + + + + Naive generic DHT, useful e.g. to verify faster algorithms. + + Time-space sample vector. + Corresponding frequency-space vector. + + + + Hartley Transform Convention + + + + + Only scale by 1/N in the inverse direction; No scaling in forward direction. + + + + + Don't scale at all (neither on forward nor on inverse transformation). + + + + + Universal; Symmetric scaling. + + + + + Numerical Integration (Quadrature). + + + + + Approximation of the definite integral of an analytic smooth function on a closed interval. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + The expected relative accuracy of the approximation. + Approximation of the finite integral in the given interval. + + + + Approximation of the definite integral of an analytic smooth function on a closed interval. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Approximation of the finite integral in the given interval. + + + + Approximates a 2-dimensional definite integral using an Nth order Gauss-Legendre rule over the rectangle [a,b] x [c,d]. + + The 2-dimensional analytic smooth function to integrate. + Where the interval starts for the first (inside) integral, exclusive and finite. + Where the interval ends for the first (inside) integral, exclusive and finite. + Where the interval starts for the second (outside) integral, exclusive and finite. + /// Where the interval ends for the second (outside) integral, exclusive and finite. + Defines an Nth order Gauss-Legendre rule. The order also defines the number of abscissas and weights for the rule. Precomputed Gauss-Legendre abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024 are used, otherwise they're calulcated on the fly. + Approximation of the finite integral in the given interval. + + + + Approximates a 2-dimensional definite integral using an Nth order Gauss-Legendre rule over the rectangle [a,b] x [c,d]. + + The 2-dimensional analytic smooth function to integrate. + Where the interval starts for the first (inside) integral, exclusive and finite. + Where the interval ends for the first (inside) integral, exclusive and finite. + Where the interval starts for the second (outside) integral, exclusive and finite. + /// Where the interval ends for the second (outside) integral, exclusive and finite. + Approximation of the finite integral in the given interval. + + + + Analytic integration algorithm for smooth functions with no discontinuities + or derivative discontinuities and no poles inside the interval. + + + + + Maximum number of iterations, until the asked + maximum error is (likely to be) satisfied. + + + + + Approximate the integral by the double exponential transformation + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + The expected relative accuracy of the approximation. + Approximation of the finite integral in the given interval. + + + + Compute the abscissa vector for a single level. + + The level to evaluate the abscissa vector for. + Abscissa Vector. + + + + Compute the weight vector for a single level. + + The level to evaluate the weight vector for. + Weight Vector. + + + + Precomputed abscissa vector per level. + + + + + Precomputed weight vector per level. + + + + + Approximates a definite integral using an Nth order Gauss-Legendre rule. Precomputed Gauss-Legendre abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024 are used, otherwise they're calulcated on the fly. + + + + + Initializes a new instance of the class. + + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Defines an Nth order Gauss-Legendre rule. The order also defines the number of abscissas and weights for the rule. Precomputed Gauss-Legendre abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024 are used, otherwise they're calulcated on the fly. + + + + Gettter for the ith abscissa. + + Index of the ith abscissa. + The ith abscissa. + + + + Getter that returns a clone of the array containing the abscissas. + + + + + Getter for the ith weight. + + Index of the ith weight. + The ith weight. + + + + Getter that returns a clone of the array containing the weights. + + + + + Getter for the order. + + + + + Getter for the InvervalBegin. + + + + + Getter for the InvervalEnd. + + + + + Approximates a definite integral using an Nth order Gauss-Legendre rule. + + The analytic smooth function to integrate. + Where the interval starts, exclusive and finite. + Where the interval ends, exclusive and finite. + Defines an Nth order Gauss-Legendre rule. The order also defines the number of abscissas and weights for the rule. Precomputed Gauss-Legendre abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024 are used, otherwise they're calulcated on the fly. + Approximation of the finite integral in the given interval. + + + + Approximates a 2-dimensional definite integral using an Nth order Gauss-Legendre rule over the rectangle [a,b] x [c,d]. + + The 2-dimensional analytic smooth function to integrate. + Where the interval starts for the first (inside) integral, exclusive and finite. + Where the interval ends for the first (inside) integral, exclusive and finite. + Where the interval starts for the second (outside) integral, exclusive and finite. + /// Where the interval ends for the second (outside) integral, exclusive and finite. + Defines an Nth order Gauss-Legendre rule. The order also defines the number of abscissas and weights for the rule. Precomputed Gauss-Legendre abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024 are used, otherwise they're calulcated on the fly. + Approximation of the finite integral in the given interval. + + + + Contains a method to compute the Gauss-Legendre abscissas/weights and precomputed abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024. + + + Contains a method to compute the Gauss-Legendre abscissas/weights and precomputed abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024. + + + + + Precomputed abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024. + + + + + Computes the Gauss-Legendre abscissas/weights. + See Pavel Holoborodko for a description of the algorithm. + + Defines an Nth order Gauss-Legendre rule. The order also defines the number of abscissas and weights for the rule. + Required precision to compute the abscissas/weights. 1e-10 is usually fine. + Object containing the non-negative abscissas/weights, order, and intervalBegin/intervalEnd. The non-negative abscissas/weights are generated over the interval [-1,1] for the given order. + + + + Creates and maps a Gauss-Legendre point. + + + + + Getter for the GaussPoint. + + Defines an Nth order Gauss-Legendre rule. Precomputed Gauss-Legendre abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024 are used, otherwise they're calulcated on the fly. + Object containing the non-negative abscissas/weights, order, and intervalBegin/intervalEnd. The non-negative abscissas/weights are generated over the interval [-1,1] for the given order. + + + + Getter for the GaussPoint. + + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Defines an Nth order Gauss-Legendre rule. Precomputed Gauss-Legendre abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024 are used, otherwise they're calulcated on the fly. + Object containing the abscissas/weights, order, and intervalBegin/intervalEnd. + + + + Maps the non-negative abscissas/weights from the interval [-1, 1] to the interval [intervalBegin, intervalEnd]. + + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Object containing the non-negative abscissas/weights, order, and intervalBegin/intervalEnd. The non-negative abscissas/weights are generated over the interval [-1,1] for the given order. + Object containing the abscissas/weights, order, and intervalBegin/intervalEnd. + + + + Contains the abscissas/weights, order, and intervalBegin/intervalEnd. + + + + + Approximation algorithm for definite integrals by the Trapezium rule of the Newton-Cotes family. + + + Wikipedia - Trapezium Rule + + + + + Direct 2-point approximation of the definite integral in the provided interval by the trapezium rule. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Approximation of the finite integral in the given interval. + + + + Composite N-point approximation of the definite integral in the provided interval by the trapezium rule. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Number of composite subdivision partitions. + Approximation of the finite integral in the given interval. + + + + Adaptive approximation of the definite integral in the provided interval by the trapezium rule. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + The expected accuracy of the approximation. + Approximation of the finite integral in the given interval. + + + + Adaptive approximation of the definite integral by the trapezium rule. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Abscissa vector per level provider. + Weight vector per level provider. + First Level Step + The expected relative accuracy of the approximation. + Approximation of the finite integral in the given interval. + + + + Approximation algorithm for definite integrals by Simpson's rule. + + + + + Direct 3-point approximation of the definite integral in the provided interval by Simpson's rule. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Approximation of the finite integral in the given interval. + + + + Composite N-point approximation of the definite integral in the provided interval by Simpson's rule. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Even number of composite subdivision partitions. + Approximation of the finite integral in the given interval. + + + + Interpolation Factory. + + + + + Creates an interpolation based on arbitrary points. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.Barycentric.InterpolateRationalFloaterHormannSorted + instead, which is more efficient. + + + + + Create a floater hormann rational pole-free interpolation based on arbitrary points. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.Barycentric.InterpolateRationalFloaterHormannSorted + instead, which is more efficient. + + + + + Create a Bulirsch Stoer rational interpolation based on arbitrary points. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.BulirschStoerRationalInterpolation.InterpolateSorted + instead, which is more efficient. + + + + + Create a barycentric polynomial interpolation where the given sample points are equidistant. + + The sample points t, must be equidistant. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.Barycentric.InterpolatePolynomialEquidistantSorted + instead, which is more efficient. + + + + + Create a Neville polynomial interpolation based on arbitrary points. + If the points happen to be equidistant, consider to use the much more robust PolynomialEquidistant instead. + Otherwise, consider whether RationalWithoutPoles would not be a more robust alternative. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.NevillePolynomialInterpolation.InterpolateSorted + instead, which is more efficient. + + + + + Create a piecewise linear interpolation based on arbitrary points. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.LinearSpline.InterpolateSorted + instead, which is more efficient. + + + + + Create piecewise log-linear interpolation based on arbitrary points. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.LogLinear.InterpolateSorted + instead, which is more efficient. + + + + + Create an piecewise natural cubic spline interpolation based on arbitrary points, + with zero secondary derivatives at the boundaries. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.CubicSpline.InterpolateNaturalSorted + instead, which is more efficient. + + + + + Create an piecewise cubic Akima spline interpolation based on arbitrary points. + Akima splines are robust to outliers. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.CubicSpline.InterpolateAkimaSorted + instead, which is more efficient. + + + + + Create a piecewise cubic Hermite spline interpolation based on arbitrary points + and their slopes/first derivative. + + The sample points t. + The sample point values x(t). + The slope at the sample points. Optimized for arrays. + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.CubicSpline.InterpolateHermiteSorted + instead, which is more efficient. + + + + + Create a step-interpolation based on arbitrary points. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.StepInterpolation.InterpolateSorted + instead, which is more efficient. + + + + + Barycentric Interpolation Algorithm. + + Supports neither differentiation nor integration. + + + Sample points (N), sorted ascendingly. + Sample values (N), sorted ascendingly by x. + Barycentric weights (N), sorted ascendingly by x. + + + + Create a barycentric polynomial interpolation from a set of (x,y) value pairs with equidistant x, sorted ascendingly by x. + + + + + Create a barycentric polynomial interpolation from an unordered set of (x,y) value pairs with equidistant x. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a barycentric polynomial interpolation from an unsorted set of (x,y) value pairs with equidistant x. + + + + + Create a barycentric polynomial interpolation from a set of values related to linearly/equidistant spaced points within an interval. + + + + + Create a barycentric rational interpolation without poles, using Mike Floater and Kai Hormann's Algorithm. + The values are assumed to be sorted ascendingly by x. + + Sample points (N), sorted ascendingly. + Sample values (N), sorted ascendingly by x. + + Order of the interpolation scheme, 0 <= order <= N. + In most cases a value between 3 and 8 gives good results. + + + + + Create a barycentric rational interpolation without poles, using Mike Floater and Kai Hormann's Algorithm. + WARNING: Works in-place and can thus causes the data array to be reordered. + + Sample points (N), no sorting assumed. + Sample values (N). + + Order of the interpolation scheme, 0 <= order <= N. + In most cases a value between 3 and 8 gives good results. + + + + + Create a barycentric rational interpolation without poles, using Mike Floater and Kai Hormann's Algorithm. + + Sample points (N), no sorting assumed. + Sample values (N). + + Order of the interpolation scheme, 0 <= order <= N. + In most cases a value between 3 and 8 gives good results. + + + + + Create a barycentric rational interpolation without poles, using Mike Floater and Kai Hormann's Algorithm. + The values are assumed to be sorted ascendingly by x. + + Sample points (N), sorted ascendingly. + Sample values (N), sorted ascendingly by x. + + + + Create a barycentric rational interpolation without poles, using Mike Floater and Kai Hormann's Algorithm. + WARNING: Works in-place and can thus causes the data array to be reordered. + + Sample points (N), no sorting assumed. + Sample values (N). + + + + Create a barycentric rational interpolation without poles, using Mike Floater and Kai Hormann's Algorithm. + + Sample points (N), no sorting assumed. + Sample values (N). + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. NOT SUPPORTED. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. NOT SUPPORTED. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. NOT SUPPORTED. + + Point t to integrate at. + + + + Definite integral between points a and b. NOT SUPPORTED. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Rational Interpolation (with poles) using Roland Bulirsch and Josef Stoer's Algorithm. + + + + This algorithm supports neither differentiation nor integration. + + + + + Sample Points t, sorted ascendingly. + Sample Values x(t), sorted ascendingly by x. + + + + Create a Bulirsch-Stoer rational interpolation from a set of (x,y) value pairs, sorted ascendingly by x. + + + + + Create a Bulirsch-Stoer rational interpolation from an unsorted set of (x,y) value pairs. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a Bulirsch-Stoer rational interpolation from an unsorted set of (x,y) value pairs. + + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. NOT SUPPORTED. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. NOT SUPPORTED. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. NOT SUPPORTED. + + Point t to integrate at. + + + + Definite integral between points a and b. NOT SUPPORTED. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Cubic Spline Interpolation. + + Supports both differentiation and integration. + + + sample points (N+1), sorted ascending + Zero order spline coefficients (N) + First order spline coefficients (N) + second order spline coefficients (N) + third order spline coefficients (N) + + + + Create a hermite cubic spline interpolation from a set of (x,y) value pairs and their slope (first derivative), sorted ascendingly by x. + + + + + Create a hermite cubic spline interpolation from an unsorted set of (x,y) value pairs and their slope (first derivative). + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a hermite cubic spline interpolation from an unsorted set of (x,y) value pairs and their slope (first derivative). + + + + + Create an Akima cubic spline interpolation from a set of (x,y) value pairs, sorted ascendingly by x. + Akima splines are robust to outliers. + + + + + Create an Akima cubic spline interpolation from an unsorted set of (x,y) value pairs. + Akima splines are robust to outliers. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create an Akima cubic spline interpolation from an unsorted set of (x,y) value pairs. + Akima splines are robust to outliers. + + + + + Create a cubic spline interpolation from a set of (x,y) value pairs, sorted ascendingly by x, + and custom boundary/termination conditions. + + + + + Create a cubic spline interpolation from an unsorted set of (x,y) value pairs and custom boundary/termination conditions. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a cubic spline interpolation from an unsorted set of (x,y) value pairs and custom boundary/termination conditions. + + + + + Create a natural cubic spline interpolation from a set of (x,y) value pairs + and zero second derivatives at the two boundaries, sorted ascendingly by x. + + + + + Create a natural cubic spline interpolation from an unsorted set of (x,y) value pairs + and zero second derivatives at the two boundaries. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a natural cubic spline interpolation from an unsorted set of (x,y) value pairs + and zero second derivatives at the two boundaries. + + + + + Three-Point Differentiation Helper. + + Sample Points t. + Sample Values x(t). + Index of the point of the differentiation. + Index of the first sample. + Index of the second sample. + Index of the third sample. + The derivative approximation. + + + + Tridiagonal Solve Helper. + + The a-vector[n]. + The b-vector[n], will be modified by this function. + The c-vector[n]. + The d-vector[n], will be modified by this function. + The x-vector[n] + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. + + Point t to integrate at. + + + + Definite integral between points a and b. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Find the index of the greatest sample point smaller than t, + or the left index of the closest segment for extrapolation. + + + + + Interpolation within the range of a discrete set of known data points. + + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. + + Point t to integrate at. + + + + Definite integral between points a and b. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Piece-wise Linear Interpolation. + + Supports both differentiation and integration. + + + Sample points (N+1), sorted ascending + Sample values (N or N+1) at the corresponding points; intercept, zero order coefficients + Slopes (N) at the sample points (first order coefficients): N + + + + Create a linear spline interpolation from a set of (x,y) value pairs, sorted ascendingly by x. + + + + + Create a linear spline interpolation from an unsorted set of (x,y) value pairs. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a linear spline interpolation from an unsorted set of (x,y) value pairs. + + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. + + Point t to integrate at. + + + + Definite integral between points a and b. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Find the index of the greatest sample point smaller than t, + or the left index of the closest segment for extrapolation. + + + + + Piece-wise Log-Linear Interpolation + + This algorithm supports differentiation, not integration. + + + + Internal Spline Interpolation + + + + Sample points (N), sorted ascending + Natural logarithm of the sample values (N) at the corresponding points + + + + Create a piecewise log-linear interpolation from a set of (x,y) value pairs, sorted ascendingly by x. + + + + + Create a piecewise log-linear interpolation from an unsorted set of (x,y) value pairs. + WARNING: Works in-place and can thus causes the data array to be reordered and modified. + + + + + Create a piecewise log-linear interpolation from an unsorted set of (x,y) value pairs. + + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. + + Point t to integrate at. + + + + Definite integral between points a and b. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Lagrange Polynomial Interpolation using Neville's Algorithm. + + + + This algorithm supports differentiation, but doesn't support integration. + + + When working with equidistant or Chebyshev sample points it is + recommended to use the barycentric algorithms specialized for + these cases instead of this arbitrary Neville algorithm. + + + + + Sample Points t, sorted ascendingly. + Sample Values x(t), sorted ascendingly by x. + + + + Create a Neville polynomial interpolation from a set of (x,y) value pairs, sorted ascendingly by x. + + + + + Create a Neville polynomial interpolation from an unsorted set of (x,y) value pairs. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a Neville polynomial interpolation from an unsorted set of (x,y) value pairs. + + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. NOT SUPPORTED. + + Point t to integrate at. + + + + Definite integral between points a and b. NOT SUPPORTED. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Quadratic Spline Interpolation. + + Supports both differentiation and integration. + + + sample points (N+1), sorted ascending + Zero order spline coefficients (N) + First order spline coefficients (N) + second order spline coefficients (N) + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. + + Point t to integrate at. + + + + Definite integral between points a and b. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Find the index of the greatest sample point smaller than t, + or the left index of the closest segment for extrapolation. + + + + + Left and right boundary conditions. + + + + + Natural Boundary (Zero second derivative). + + + + + Parabolically Terminated boundary. + + + + + Fixed first derivative at the boundary. + + + + + Fixed second derivative at the boundary. + + + + + A step function where the start of each segment is included, and the last segment is open-ended. + Segment i is [x_i, x_i+1) for i < N, or [x_i, infinity] for i = N. + The domain of the function is all real numbers, such that y = 0 where x <. + + Supports both differentiation and integration. + + + Sample points (N), sorted ascending + Samples values (N) of each segment starting at the corresponding sample point. + + + + Create a linear spline interpolation from a set of (x,y) value pairs, sorted ascendingly by x. + + + + + Create a linear spline interpolation from an unsorted set of (x,y) value pairs. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a linear spline interpolation from an unsorted set of (x,y) value pairs. + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. + + Point t to integrate at. + + + + Definite integral between points a and b. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Find the index of the greatest sample point smaller than t. + + + + + Wraps an interpolation with a transformation of the interpolated values. + + Neither differentiation nor integration is supported. + + + + Create a linear spline interpolation from a set of (x,y) value pairs, sorted ascendingly by x. + + + + + Create a linear spline interpolation from an unsorted set of (x,y) value pairs. + WARNING: Works in-place and can thus causes the data array to be reordered and modified. + + + + + Create a linear spline interpolation from an unsorted set of (x,y) value pairs. + + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. NOT SUPPORTED. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. NOT SUPPORTED. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. NOT SUPPORTED. + + Point t to integrate at. + + + + Definite integral between points a and b. NOT SUPPORTED. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + A Matrix class with dense storage. The underlying storage is a one dimensional array in column-major order (column by column). + + + + + Number of rows. + + Using this instead of the RowCount property to speed up calculating + a matrix index in the data array. + + + + Number of columns. + + Using this instead of the ColumnCount property to speed up calculating + a matrix index in the data array. + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new dense matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new dense matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to be in column-major order (column by column) and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + + Create a new dense matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable. + The enumerable is assumed to be in column-major order (column by column). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix and initialize each value to the same provided value. + + + + + Create a new dense matrix and initialize each value using the provided init function. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new dense matrix with values sampled from the provided random distribution. + + + + + Gets the matrix's data. + + The matrix's data. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of add + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the matrix and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Evaluates whether this matrix is symmetric. + + + + + A vector using dense storage. + + + + + Number of elements + + + + + Gets the vector's data. + + + + + Create a new dense vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new dense vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new dense vector directly binding to a raw array. + The array is used directly without copying. + Very efficient, but changes to the array and the vector will affect each other. + + + + + Create a new dense vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector and initialize each value using the provided value. + + + + + Create a new dense vector and initialize each value using the provided init function. + + + + + Create a new dense vector with values sampled from the provided random distribution. + + + + + Gets the vector's data. + + The vector's data. + + + + Returns a reference to the internal data structure. + + The DenseVector whose internal data we are + returning. + + A reference to the internal date of the given vector. + + + + + Returns a vector bound directly to a reference of the provided array. + + The array to bind to the DenseVector object. + + A DenseVector whose values are bound to the given array. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + The scalar to add. + The vector to store the result of the addition. + + + + Adds another vector to this vector and stores the result into the result vector. + + The vector to add to this one. + The vector to store the result of the addition. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The vector to store the result of the subtraction. + + + + Subtracts another vector to this vector and stores the result into the result vector. + + The vector to subtract from this one. + The vector to store the result of the subtraction. + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Negates vector and saves result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + The scalar to multiply. + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Multiplies a vector with a scalar. + + The vector to scale. + The scalar value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a scalar. + + The scalar value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a scalar. + + The vector to divide. + The scalar value. + The result of the division. + If is . + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The divisor to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The divisor to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + of each element of the vector of the given divisor. + + The vector whose elements we want to compute the remainder of. + The divisor to use, + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Creates a double dense vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n,n,..', '(n,n,..)', '[n,n,...]', where n is a double. + + + A double dense vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a real dense vector to double-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a real dense vector to double-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + A matrix type for diagonal matrices. + + + Diagonal matrices can be non-square matrices but the diagonal always starts + at element 0,0. A diagonal matrix will throw an exception if non diagonal + entries are set. The exception to this is when the off diagonal elements are + 0.0 or NaN; these settings will cause no change to the diagonal matrix. + + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new diagonal matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All diagonal cells of the matrix will be initialized to the provided value, all non-diagonal ones to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to contain the diagonal elements only and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new diagonal matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + The matrix to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + The array to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided enumerable. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new diagonal matrix with diagonal values sampled from the provided random distribution. + + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + If the result matrix's dimensions are not the same as this matrix. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to add. + The matrix to store the result of the division. + + + + Computes the determinant of this matrix. + + The determinant of this matrix. + + + + Returns the elements of the diagonal in a . + + The elements of the diagonal. + For non-square matrices, the method returns Min(Rows, Columns) elements where + i == j (i is the row index, and j is the column index). + + + + Copies the values of the given array to the diagonal. + + The array to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + + Copies the values of the given to the diagonal. + + The vector to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced L2 norm of the matrix. + The largest singular value of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + Calculates the condition number of this matrix. + The condition number of the matrix. + + + Computes the inverse of this matrix. + If is not a square matrix. + If is singular. + The inverse of this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Creates a matrix that contains the values from the requested sub-matrix. + + The row to start copying from. + The number of rows to copy. Must be positive. + The column to start copying from. + The number of columns to copy. Must be positive. + The requested sub-matrix. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + If or + is not positive. + + + + Permute the columns of a matrix according to a permutation. + + The column permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Permute the rows of a matrix according to a permutation. + + The row permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Evaluates whether this matrix is symmetric. + + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + A class which encapsulates the functionality of a Cholesky factorization. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Gets the determinant of the matrix for which the Cholesky matrix was computed. + + + + + Gets the log determinant of the matrix for which the Cholesky matrix was computed. + + + + + A class which encapsulates the functionality of a Cholesky factorization for dense matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Symmetric Householder reduction to tridiagonal form. + + Data array of matrix V (eigenvectors) + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tred2 by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + Data array of matrix V (eigenvectors) + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Nonsymmetric reduction to Hessenberg form. + + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Complex scalar division X/Y. + + Real part of X + Imaginary part of X + Real part of Y + Imaginary part of Y + Division result as a number. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an orthogonal matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Factorize matrix using the modified Gram-Schmidt method. + + Initial matrix. On exit is replaced by Q. + Number of rows in Q. + Number of columns in Q. + On exit is filled by R. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Gets or sets Tau vector. Contains additional information on Q - used for native solver. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The type of QR factorization to perform. + If is null. + If row count is less then column count + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + If SVD algorithm failed to converge with matrix . + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + Matrix V is encoded in the property EigenVectors in the way that: + - column corresponding to real eigenvalue represents real eigenvector, + - columns corresponding to the pair of complex conjugate eigenvalues + lambda[i] and lambda[i+1] encode real and imaginary parts of eigenvectors. + + + + + Gets the absolute value of determinant of the square matrix for which the EVD was computed. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + In the Math.Net implementation we also store a set of pivot elements for increased + numerical stability. The pivot elements encode a permutation matrix P such that P*A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Gets the determinant of the matrix for which the LU factorization was computed. + + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A (m x n) may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + If a factorization is performed, the resulting Q matrix is an m x m matrix + and the R matrix is an m x n matrix. If a factorization is performed, the + resulting Q matrix is an m x n matrix and the R matrix is an n x n matrix. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD). + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets the two norm of the . + + The 2-norm of the . + + + + Gets the condition number max(S) / min(S) + + The condition number. + + + + Gets the determinant of the square matrix for which the SVD was computed. + + + + + A class which encapsulates the functionality of a Cholesky factorization for user matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Symmetric Householder reduction to tridiagonal form. + + The eigen vectors to work on. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tred2 by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + The eigen vectors to work on. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Nonsymmetric reduction to Hessenberg form. + + The eigen vectors to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + The eigen vectors to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Complex scalar division X/Y. + + Real part of X + Imaginary part of X + Real part of Y + Imaginary part of Y + Division result as a number. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an orthogonal matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The QR factorization method to use. + If is null. + + + + Generate column from initial matrix to work array + + Initial matrix + The first row + Column index + Generated vector + + + + Perform calculation of Q or R + + Work array + Q or R matrices + The first row + The last row + The first column + The last column + Number of available CPUs + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + + + + + Calculates absolute value of multiplied on signum function of + + Double value z1 + Double value z2 + Result multiplication of signum function and absolute value + + + + Swap column and + + Source matrix + The number of rows in + Column A index to swap + Column B index to swap + + + + Scale column by starting from row + + Source matrix + The number of rows in + Column to scale + Row to scale from + Scale value + + + + Scale vector by starting from index + + Source vector + Row to scale from + Scale value + + + + Given the Cartesian coordinates (da, db) of a point p, these fucntion return the parameters da, db, c, and s + associated with the Givens rotation that zeros the y-coordinate of the point. + + Provides the x-coordinate of the point p. On exit contains the parameter r associated with the Givens rotation + Provides the y-coordinate of the point p. On exit contains the parameter z associated with the Givens rotation + Contains the parameter c associated with the Givens rotation + Contains the parameter s associated with the Givens rotation + This is equivalent to the DROTG LAPACK routine. + + + + Calculate Norm 2 of the column in matrix starting from row + + Source matrix + The number of rows in + Column index + Start row index + Norm2 (Euclidean norm) of the column + + + + Calculate Norm 2 of the vector starting from index + + Source vector + Start index + Norm2 (Euclidean norm) of the vector + + + + Calculate dot product of and + + Source matrix + The number of rows in + Index of column A + Index of column B + Starting row index + Dot product value + + + + Performs rotation of points in the plane. Given two vectors x and y , + each vector element of these vectors is replaced as follows: x(i) = c*x(i) + s*y(i); y(i) = c*y(i) - s*x(i) + + Source matrix + The number of rows in + Index of column A + Index of column B + Scalar "c" value + Scalar "s" value + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + double version of the class. + + + + + Initializes a new instance of the Matrix class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Returns the conjugate transpose of this matrix. + + The conjugate transpose of this matrix. + + + + Puts the conjugate transpose of this matrix into the result matrix. + + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to divide by each element of the matrix. + The matrix to store the result of the division. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The matrix to store the result of the pointwise power. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Computes the Moore-Penrose Pseudo-Inverse of this matrix. + + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Calculates the p-norms of all row vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the p-norms of all column vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all row vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all column vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the value sum of each row vector. + + + + + Calculates the absolute value sum of each row vector. + + + + + Calculates the value sum of each column vector. + + + + + Calculates the absolute value sum of each column vector. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Bi-Conjugate Gradient Stabilized (BiCGStab) solver is an 'improvement' + of the standard Conjugate Gradient (CG) solver. Unlike the CG solver the + BiCGStab can be used on non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The Bi-CGSTAB algorithm was taken from:
+ Templates for the solution of linear systems: Building blocks + for iterative methods +
+ Richard Barrett, Michael Berry, Tony F. Chan, James Demmel, + June M. Donato, Jack Dongarra, Victor Eijkhout, Roldan Pozo, + Charles Romine and Henk van der Vorst +
+ Url: http://www.netlib.org/templates/Templates.html +
+ Algorithm is described in Chapter 2, section 2.3.8, page 27 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient , A. + The solution , b. + The result , x. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A composite matrix solver. The actual solver is made by a sequence of + matrix solvers. + + + + Solver based on:
+ Faster PDE-based simulations using robust composite linear solvers
+ S. Bhowmicka, P. Raghavan a,*, L. McInnes b, B. Norris
+ Future Generation Computer Systems, Vol 20, 2004, pp 373–387
+
+ + Note that if an iterator is passed to this solver it will be used for all the sub-solvers. + +
+
+ + + The collection of solvers that will be used + + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A diagonal preconditioner. The preconditioner uses the inverse + of the matrix diagonal as preconditioning values. + + + + + The inverse of the matrix diagonal. + + + + + Returns the decomposed matrix diagonal. + + The matrix diagonal. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + A Generalized Product Bi-Conjugate Gradient iterative matrix solver. + + + + The Generalized Product Bi-Conjugate Gradient (GPBiCG) solver is an + alternative version of the Bi-Conjugate Gradient stabilized (CG) solver. + Unlike the CG solver the GPBiCG solver can be used on + non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The GPBiCG algorithm was taken from:
+ GPBiCG(m,l): A hybrid of BiCGSTAB and GPBiCG methods with + efficiency and robustness +
+ S. Fujino +
+ Applied Numerical Mathematics, Volume 41, 2002, pp 107 - 117 +
+
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Indicates the number of BiCGStab steps should be taken + before switching. + + + + + Indicates the number of GPBiCG steps should be taken + before switching. + + + + + Gets or sets the number of steps taken with the BiCgStab algorithm + before switching over to the GPBiCG algorithm. + + + + + Gets or sets the number of steps taken with the GPBiCG algorithm + before switching over to the BiCgStab algorithm. + + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Decide if to do steps with BiCgStab + + Number of iteration + true if yes, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + An incomplete, level 0, LU factorization preconditioner. + + + The ILU(0) algorithm was taken from:
+ Iterative methods for sparse linear systems
+ Yousef Saad
+ Algorithm is described in Chapter 10, section 10.3.2, page 275
+
+
+ + + The matrix holding the lower (L) and upper (U) matrices. The + decomposition matrices are combined to reduce storage. + + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + A new matrix containing the lower triagonal elements. + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + This class performs an Incomplete LU factorization with drop tolerance + and partial pivoting. The drop tolerance indicates which additional entries + will be dropped from the factorized LU matrices. + + + The ILUTP-Mem algorithm was taken from:
+ ILUTP_Mem: a Space-Efficient Incomplete LU Preconditioner +
+ Tzu-Yi Chen, Department of Mathematics and Computer Science,
+ Pomona College, Claremont CA 91711, USA
+ Published in:
+ Lecture Notes in Computer Science
+ Volume 3046 / 2004
+ pp. 20 - 28
+ Algorithm is described in Section 2, page 22 +
+
+ + + The default fill level. + + + + + The default drop tolerance. + + + + + The decomposed upper triangular matrix. + + + + + The decomposed lower triangular matrix. + + + + + The array containing the pivot values. + + + + + The fill level. + + + + + The drop tolerance. + + + + + The pivot tolerance. + + + + + Initializes a new instance of the class with the default settings. + + + + + Initializes a new instance of the class with the specified settings. + + + The amount of fill that is allowed in the matrix. The value is a fraction of + the number of non-zero entries in the original matrix. Values should be positive. + + + The absolute drop tolerance which indicates below what absolute value an entry + will be dropped from the matrix. A drop tolerance of 0.0 means that no values + will be dropped. Values should always be positive. + + + The pivot tolerance which indicates at what level pivoting will take place. A + value of 0.0 means that no pivoting will take place. + + + + + Gets or sets the amount of fill that is allowed in the matrix. The + value is a fraction of the number of non-zero entries in the original + matrix. The standard value is 200. + + + + Values should always be positive and can be higher than 1.0. A value lower + than 1.0 means that the eventual preconditioner matrix will have fewer + non-zero entries as the original matrix. A value higher than 1.0 means that + the eventual preconditioner can have more non-zero values than the original + matrix. + + + Note that any changes to the FillLevel after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the absolute drop tolerance which indicates below what absolute value + an entry will be dropped from the matrix. The standard value is 0.0001. + + + + The values should always be positive and can be larger than 1.0. A low value will + keep more small numbers in the preconditioner matrix. A high value will remove + more small numbers from the preconditioner matrix. + + + Note that any changes to the DropTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the pivot tolerance which indicates at what level pivoting will + take place. The standard value is 0.0 which means pivoting will never take place. + + + + The pivot tolerance is used to calculate if pivoting is necessary. Pivoting + will take place if any of the values in a row is bigger than the + diagonal value of that row divided by the pivot tolerance, i.e. pivoting + will take place if row(i,j) > row(i,i) / PivotTolerance for + any j that is not equal to i. + + + Note that any changes to the PivotTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the lower triagonal elements. + + + + Returns the pivot array. This array is not needed for normal use because + the preconditioner will return the solution vector values in the proper order. + + + This method is used for debugging purposes only and should normally not be used. + + The pivot array. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. Note that the + method takes a general matrix type. However internally the data is stored + as a sparse matrix. Therefore it is not recommended to pass a dense matrix. + + If is . + If is not a square matrix. + + + + Pivot elements in the according to internal pivot array + + Row to pivot in + + + + Was pivoting already performed + + Pivots already done + Current item to pivot + true if performed, otherwise false + + + + Swap columns in the + + Source . + First column index to swap + Second column index to swap + + + + Sort vector descending, not changing vector but placing sorted indicies to + + Start sort form + Sort till upper bound + Array with sorted vector indicies + Source + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + Pivot elements in according to internal pivot array + + Source . + Result after pivoting. + + + + An element sort algorithm for the class. + + + This sort algorithm is used to sort the columns in a sparse matrix based on + the value of the element on the diagonal of the matrix. + + + + + Sorts the elements of the vector in decreasing + fashion. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Sorts the elements of the vector in decreasing + fashion using heap sort algorithm. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Build heap for double indicies + + Root position + Length of + Indicies of + Target + + + + Sift double indicies + + Indicies of + Target + Root position + Length of + + + + Sorts the given integers in a decreasing fashion. + + The values. + + + + Sort the given integers in a decreasing fashion using heapsort algorithm + + Array of values to sort + Length of + + + + Build heap + + Target values array + Root position + Length of + + + + Sift values + + Target value array + Root position + Length of + + + + Exchange values in array + + Target values array + First value to exchange + Second value to exchange + + + + A simple milu(0) preconditioner. + + + Original Fortran code by Youcef Saad (07 January 2004) + + + + Use modified or standard ILU(0) + + + + Gets or sets a value indicating whether to use modified or standard ILU(0). + + + + + Gets a value indicating whether the preconditioner is initialized. + + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square or is not an + instance of SparseCompressedRowMatrixStorage. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector b. + The left hand side vector x. + + + + MILU0 is a simple milu(0) preconditioner. + + Order of the matrix. + Matrix values in CSR format (input). + Column indices (input). + Row pointers (input). + Matrix values in MSR format (output). + Row pointers and column indices (output). + Pointer to diagonal elements (output). + True if the modified/MILU algorithm should be used (recommended) + Returns 0 on success or k > 0 if a zero pivot was encountered at step k. + + + + A Multiple-Lanczos Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Multiple-Lanczos Bi-Conjugate Gradient stabilized (ML(k)-BiCGStab) solver is an 'improvement' + of the standard BiCgStab solver. + + + The algorithm was taken from:
+ ML(k)BiCGSTAB: A BiCGSTAB variant based on multiple Lanczos starting vectors +
+ Man-chung Yeung and Tony F. Chan +
+ SIAM Journal of Scientific Computing +
+ Volume 21, Number 4, pp. 1263 - 1290 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + The default number of starting vectors. + + + + + The collection of starting vectors which are used as the basis for the Krylov sub-space. + + + + + The number of starting vectors used by the algorithm + + + + + Gets or sets the number of starting vectors. + + + Must be larger than 1 and smaller than the number of variables in the matrix that + for which this solver will be used. + + + + + Resets the number of starting vectors to the default value. + + + + + Gets or sets a series of orthonormal vectors which will be used as basis for the + Krylov sub-space. + + + + + Gets the number of starting vectors to create + + Maximum number + Number of variables + Number of starting vectors to create + + + + Returns an array of starting vectors. + + The maximum number of starting vectors that should be created. + The number of variables. + + An array with starting vectors. The array will never be larger than the + but it may be smaller if + the is smaller than + the . + + + + + Create random vecrors array + + Number of vectors + Size of each vector + Array of random vectors + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Source A. + Residual data. + x data. + b data. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Transpose Free Quasi-Minimal Residual (TFQMR) iterative matrix solver. + + + + The TFQMR algorithm was taken from:
+ Iterative methods for sparse linear systems. +
+ Yousef Saad +
+ Algorithm is described in Chapter 7, section 7.4.3, page 219 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Is even? + + Number to check + true if even, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Matrix with sparse storage, intended for very large matrices where most of the cells are zero. + The underlying storage scheme is 3-array compressed-sparse-row (CSR) Format. + Wikipedia - CSR. + + + + + Gets the number of non zero elements in the matrix. + + The number of non zero elements. + + + + Create a new sparse matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new sparse matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable. + The enumerable is assumed to be in row-major order (row by row). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + + Create a new sparse matrix with the given number of rows and columns as a copy of the given array. + The array is assumed to be in column-major order (column by column). + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix and initialize each value to the same provided value. + + + + + Create a new sparse matrix and initialize each value using the provided init function. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Evaluates whether this matrix is symmetric. + + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + A vector with sparse storage, intended for very large vectors where most of the cells are zero. + + The sparse vector is not thread safe. + + + + Gets the number of non zero elements in the vector. + + The number of non zero elements. + + + + Create a new sparse vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new sparse vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new sparse vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector and initialize each value using the provided value. + + + + + Create a new sparse vector and initialize each value using the provided init function. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + Warning, the new 'sparse vector' with a non-zero scalar added to it will be a 100% filled + sparse vector and very inefficient. Would be better to work with a dense vector instead. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Negates vector and saves result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Multiplies a vector with a scalar. + + The vector to scale. + The scalar value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a scalar. + + The scalar value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a scalar. + + The vector to divide. + The scalar value. + The result of the division. + If is . + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + of each element of the vector of the given divisor. + + The vector whose elements we want to compute the modulus of. + The divisor to use, + The result of the calculation + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Creates a double sparse vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n,n,..', '(n,n,..)', '[n,n,...]', where n is a double. + + + A double sparse vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a real sparse vector to double-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a real sparse vector to double-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + double version of the class. + + + + + Initializes a new instance of the Vector class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Conjugates vector and save result to + + Target vector + + + + Negates vector and saves result to + + Target vector + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Divides each element of the vector by a scalar and stores the result in the result vector. + + + The scalar to divide with. + + + The vector to store the result of the division. + + + + + Divides a scalar by each element of the vector and stores the result in the result vector. + + The scalar to divide. + The vector to store the result of the division. + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise raise this vector to an exponent and store the result into the result vector. + + The exponent to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Returns the value of the absolute minimum element. + + The value of the absolute minimum element. + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the value of the absolute maximum element. + + The value of the absolute maximum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + + The p value. + + + Scalar ret = ( ∑|At(i)|^p )^(1/p) + + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Normalizes this vector to a unit vector with respect to the p-norm. + + + The p value. + + + This vector normalized to a unit vector with respect to the p-norm. + + + + + A Matrix class with dense storage. The underlying storage is a one dimensional array in column-major order (column by column). + + + + + Number of rows. + + Using this instead of the RowCount property to speed up calculating + a matrix index in the data array. + + + + Number of columns. + + Using this instead of the ColumnCount property to speed up calculating + a matrix index in the data array. + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new dense matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new dense matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to be in column-major order (column by column) and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + + Create a new dense matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable. + The enumerable is assumed to be in column-major order (column by column). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix and initialize each value to the same provided value. + + + + + Create a new dense matrix and initialize each value using the provided init function. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new dense matrix with values sampled from the provided random distribution. + + + + + Gets the matrix's data. + + The matrix's data. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of add + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the matrix and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Evaluates whether this matrix is symmetric. + + + + + A vector using dense storage. + + + + + Number of elements + + + + + Gets the vector's data. + + + + + Create a new dense vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new dense vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new dense vector directly binding to a raw array. + The array is used directly without copying. + Very efficient, but changes to the array and the vector will affect each other. + + + + + Create a new dense vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector and initialize each value using the provided value. + + + + + Create a new dense vector and initialize each value using the provided init function. + + + + + Create a new dense vector with values sampled from the provided random distribution. + + + + + Gets the vector's data. + + The vector's data. + + + + Returns a reference to the internal data structure. + + The DenseVector whose internal data we are + returning. + + A reference to the internal date of the given vector. + + + + + Returns a vector bound directly to a reference of the provided array. + + The array to bind to the DenseVector object. + + A DenseVector whose values are bound to the given array. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + The scalar to add. + The vector to store the result of the addition. + + + + Adds another vector to this vector and stores the result into the result vector. + + The vector to add to this one. + The vector to store the result of the addition. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The vector to store the result of the subtraction. + + + + Subtracts another vector from this vector and stores the result into the result vector. + + The vector to subtract from this one. + The vector to store the result of the subtraction. + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Negates vector and saves result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + The scalar to multiply. + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Multiplies a vector with a scalar. + + The vector to scale. + The scalar value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a scalar. + + The scalar value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a scalar. + + The vector to divide. + The scalar value. + The result of the division. + If is . + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + of each element of the vector of the given divisor. + + The vector whose elements we want to compute the modulus of. + The divisor to use, + The result of the calculation + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Creates a float dense vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n,n,..', '(n,n,..)', '[n,n,...]', where n is a float. + + + A float dense vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a real dense vector to float-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a real dense vector to float-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + A matrix type for diagonal matrices. + + + Diagonal matrices can be non-square matrices but the diagonal always starts + at element 0,0. A diagonal matrix will throw an exception if non diagonal + entries are set. The exception to this is when the off diagonal elements are + 0.0 or NaN; these settings will cause no change to the diagonal matrix. + + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new diagonal matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All diagonal cells of the matrix will be initialized to the provided value, all non-diagonal ones to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to contain the diagonal elements only and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new diagonal matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + The matrix to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + The array to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided enumerable. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new diagonal matrix with diagonal values sampled from the provided random distribution. + + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + If the result matrix's dimensions are not the same as this matrix. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to add. + The matrix to store the result of the division. + + + + Computes the determinant of this matrix. + + The determinant of this matrix. + + + + Returns the elements of the diagonal in a . + + The elements of the diagonal. + For non-square matrices, the method returns Min(Rows, Columns) elements where + i == j (i is the row index, and j is the column index). + + + + Copies the values of the given array to the diagonal. + + The array to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + + Copies the values of the given to the diagonal. + + The vector to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced L2 norm of the matrix. + The largest singular value of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + Calculates the condition number of this matrix. + The condition number of the matrix. + + + Computes the inverse of this matrix. + If is not a square matrix. + If is singular. + The inverse of this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Creates a matrix that contains the values from the requested sub-matrix. + + The row to start copying from. + The number of rows to copy. Must be positive. + The column to start copying from. + The number of columns to copy. Must be positive. + The requested sub-matrix. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + If or + is not positive. + + + + Permute the columns of a matrix according to a permutation. + + The column permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Permute the rows of a matrix according to a permutation. + + The row permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Evaluates whether this matrix is symmetric. + + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + A class which encapsulates the functionality of a Cholesky factorization. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Gets the determinant of the matrix for which the Cholesky matrix was computed. + + + + + Gets the log determinant of the matrix for which the Cholesky matrix was computed. + + + + + A class which encapsulates the functionality of a Cholesky factorization for dense matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Symmetric Householder reduction to tridiagonal form. + + Data array of matrix V (eigenvectors) + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tred2 by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + Data array of matrix V (eigenvectors) + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Nonsymmetric reduction to Hessenberg form. + + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Complex scalar division X/Y. + + Real part of X + Imaginary part of X + Real part of Y + Imaginary part of Y + Division result as a number. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an orthogonal matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Factorize matrix using the modified Gram-Schmidt method. + + Initial matrix. On exit is replaced by Q. + Number of rows in Q. + Number of columns in Q. + On exit is filled by R. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Gets or sets Tau vector. Contains additional information on Q - used for native solver. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The QR factorization method to use. + If is null. + If row count is less then column count + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + If SVD algorithm failed to converge with matrix . + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Gets the absolute value of determinant of the square matrix for which the EVD was computed. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + In the Math.Net implementation we also store a set of pivot elements for increased + numerical stability. The pivot elements encode a permutation matrix P such that P*A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Gets the determinant of the matrix for which the LU factorization was computed. + + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A (m x n) may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + If a factorization is peformed, the resulting Q matrix is an m x m matrix + and the R matrix is an m x n matrix. If a factorization is performed, the + resulting Q matrix is an m x n matrix and the R matrix is an n x n matrix. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD). + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets the two norm of the . + + The 2-norm of the . + + + + Gets the condition number max(S) / min(S) + + The condition number. + + + + Gets the determinant of the square matrix for which the SVD was computed. + + + + + A class which encapsulates the functionality of a Cholesky factorization for user matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Symmetric Householder reduction to tridiagonal form. + + The eigen vectors to work on. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tred2 by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + The eigen vectors to work on. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Nonsymmetric reduction to Hessenberg form. + + The eigen vectors to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + The eigen vectors to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Complex scalar division X/Y. + + Real part of X + Imaginary part of X + Real part of Y + Imaginary part of Y + Division result as a number. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an orthogonal matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The QR factorization method to use. + If is null. + + + + Generate column from initial matrix to work array + + Initial matrix + The first row + Column index + Generated vector + + + + Perform calculation of Q or R + + Work array + Q or R matrices + The first row + The last row + The first column + The last column + Number of available CPUs + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + + + + + Calculates absolute value of multiplied on signum function of + + Double value z1 + Double value z2 + Result multiplication of signum function and absolute value + + + + Swap column and + + Source matrix + The number of rows in + Column A index to swap + Column B index to swap + + + + Scale column by starting from row + + Source matrix + The number of rows in + Column to scale + Row to scale from + Scale value + + + + Scale vector by starting from index + + Source vector + Row to scale from + Scale value + + + + Given the Cartesian coordinates (da, db) of a point p, these fucntion return the parameters da, db, c, and s + associated with the Givens rotation that zeros the y-coordinate of the point. + + Provides the x-coordinate of the point p. On exit contains the parameter r associated with the Givens rotation + Provides the y-coordinate of the point p. On exit contains the parameter z associated with the Givens rotation + Contains the parameter c associated with the Givens rotation + Contains the parameter s associated with the Givens rotation + This is equivalent to the DROTG LAPACK routine. + + + + Calculate Norm 2 of the column in matrix starting from row + + Source matrix + The number of rows in + Column index + Start row index + Norm2 (Euclidean norm) of the column + + + + Calculate Norm 2 of the vector starting from index + + Source vector + Start index + Norm2 (Euclidean norm) of the vector + + + + Calculate dot product of and + + Source matrix + The number of rows in + Index of column A + Index of column B + Starting row index + Dot product value + + + + Performs rotation of points in the plane. Given two vectors x and y , + each vector element of these vectors is replaced as follows: x(i) = c*x(i) + s*y(i); y(i) = c*y(i) - s*x(i) + + Source matrix + The number of rows in + Index of column A + Index of column B + Scalar "c" value + Scalar "s" value + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + float version of the class. + + + + + Initializes a new instance of the Matrix class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Returns the conjugate transpose of this matrix. + + The conjugate transpose of this matrix. + + + + Puts the conjugate transpose of this matrix into the result matrix. + + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to divide by each element of the matrix. + The matrix to store the result of the division. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The matrix to store the result of the pointwise power. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Computes the Moore-Penrose Pseudo-Inverse of this matrix. + + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Calculates the p-norms of all row vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the p-norms of all column vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all row vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all column vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the value sum of each row vector. + + + + + Calculates the absolute value sum of each row vector. + + + + + Calculates the value sum of each column vector. + + + + + Calculates the absolute value sum of each column vector. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Bi-Conjugate Gradient Stabilized (BiCGStab) solver is an 'improvement' + of the standard Conjugate Gradient (CG) solver. Unlike the CG solver the + BiCGStab can be used on non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The Bi-CGSTAB algorithm was taken from:
+ Templates for the solution of linear systems: Building blocks + for iterative methods +
+ Richard Barrett, Michael Berry, Tony F. Chan, James Demmel, + June M. Donato, Jack Dongarra, Victor Eijkhout, Roldan Pozo, + Charles Romine and Henk van der Vorst +
+ Url: http://www.netlib.org/templates/Templates.html +
+ Algorithm is described in Chapter 2, section 2.3.8, page 27 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient , A. + The solution , b. + The result , x. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A composite matrix solver. The actual solver is made by a sequence of + matrix solvers. + + + + Solver based on:
+ Faster PDE-based simulations using robust composite linear solvers
+ S. Bhowmicka, P. Raghavan a,*, L. McInnes b, B. Norris
+ Future Generation Computer Systems, Vol 20, 2004, pp 373–387
+
+ + Note that if an iterator is passed to this solver it will be used for all the sub-solvers. + +
+
+ + + The collection of solvers that will be used + + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A diagonal preconditioner. The preconditioner uses the inverse + of the matrix diagonal as preconditioning values. + + + + + The inverse of the matrix diagonal. + + + + + Returns the decomposed matrix diagonal. + + The matrix diagonal. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + A Generalized Product Bi-Conjugate Gradient iterative matrix solver. + + + + The Generalized Product Bi-Conjugate Gradient (GPBiCG) solver is an + alternative version of the Bi-Conjugate Gradient stabilized (CG) solver. + Unlike the CG solver the GPBiCG solver can be used on + non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The GPBiCG algorithm was taken from:
+ GPBiCG(m,l): A hybrid of BiCGSTAB and GPBiCG methods with + efficiency and robustness +
+ S. Fujino +
+ Applied Numerical Mathematics, Volume 41, 2002, pp 107 - 117 +
+
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Indicates the number of BiCGStab steps should be taken + before switching. + + + + + Indicates the number of GPBiCG steps should be taken + before switching. + + + + + Gets or sets the number of steps taken with the BiCgStab algorithm + before switching over to the GPBiCG algorithm. + + + + + Gets or sets the number of steps taken with the GPBiCG algorithm + before switching over to the BiCgStab algorithm. + + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Decide if to do steps with BiCgStab + + Number of iteration + true if yes, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + An incomplete, level 0, LU factorization preconditioner. + + + The ILU(0) algorithm was taken from:
+ Iterative methods for sparse linear systems
+ Yousef Saad
+ Algorithm is described in Chapter 10, section 10.3.2, page 275
+
+
+ + + The matrix holding the lower (L) and upper (U) matrices. The + decomposition matrices are combined to reduce storage. + + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + A new matrix containing the lower triagonal elements. + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + This class performs an Incomplete LU factorization with drop tolerance + and partial pivoting. The drop tolerance indicates which additional entries + will be dropped from the factorized LU matrices. + + + The ILUTP-Mem algorithm was taken from:
+ ILUTP_Mem: a Space-Efficient Incomplete LU Preconditioner +
+ Tzu-Yi Chen, Department of Mathematics and Computer Science,
+ Pomona College, Claremont CA 91711, USA
+ Published in:
+ Lecture Notes in Computer Science
+ Volume 3046 / 2004
+ pp. 20 - 28
+ Algorithm is described in Section 2, page 22 +
+
+ + + The default fill level. + + + + + The default drop tolerance. + + + + + The decomposed upper triangular matrix. + + + + + The decomposed lower triangular matrix. + + + + + The array containing the pivot values. + + + + + The fill level. + + + + + The drop tolerance. + + + + + The pivot tolerance. + + + + + Initializes a new instance of the class with the default settings. + + + + + Initializes a new instance of the class with the specified settings. + + + The amount of fill that is allowed in the matrix. The value is a fraction of + the number of non-zero entries in the original matrix. Values should be positive. + + + The absolute drop tolerance which indicates below what absolute value an entry + will be dropped from the matrix. A drop tolerance of 0.0 means that no values + will be dropped. Values should always be positive. + + + The pivot tolerance which indicates at what level pivoting will take place. A + value of 0.0 means that no pivoting will take place. + + + + + Gets or sets the amount of fill that is allowed in the matrix. The + value is a fraction of the number of non-zero entries in the original + matrix. The standard value is 200. + + + + Values should always be positive and can be higher than 1.0. A value lower + than 1.0 means that the eventual preconditioner matrix will have fewer + non-zero entries as the original matrix. A value higher than 1.0 means that + the eventual preconditioner can have more non-zero values than the original + matrix. + + + Note that any changes to the FillLevel after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the absolute drop tolerance which indicates below what absolute value + an entry will be dropped from the matrix. The standard value is 0.0001. + + + + The values should always be positive and can be larger than 1.0. A low value will + keep more small numbers in the preconditioner matrix. A high value will remove + more small numbers from the preconditioner matrix. + + + Note that any changes to the DropTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the pivot tolerance which indicates at what level pivoting will + take place. The standard value is 0.0 which means pivoting will never take place. + + + + The pivot tolerance is used to calculate if pivoting is necessary. Pivoting + will take place if any of the values in a row is bigger than the + diagonal value of that row divided by the pivot tolerance, i.e. pivoting + will take place if row(i,j) > row(i,i) / PivotTolerance for + any j that is not equal to i. + + + Note that any changes to the PivotTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the lower triagonal elements. + + + + Returns the pivot array. This array is not needed for normal use because + the preconditioner will return the solution vector values in the proper order. + + + This method is used for debugging purposes only and should normally not be used. + + The pivot array. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. Note that the + method takes a general matrix type. However internally the data is stored + as a sparse matrix. Therefore it is not recommended to pass a dense matrix. + + If is . + If is not a square matrix. + + + + Pivot elements in the according to internal pivot array + + Row to pivot in + + + + Was pivoting already performed + + Pivots already done + Current item to pivot + true if performed, otherwise false + + + + Swap columns in the + + Source . + First column index to swap + Second column index to swap + + + + Sort vector descending, not changing vector but placing sorted indicies to + + Start sort form + Sort till upper bound + Array with sorted vector indicies + Source + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + Pivot elements in according to internal pivot array + + Source . + Result after pivoting. + + + + An element sort algorithm for the class. + + + This sort algorithm is used to sort the columns in a sparse matrix based on + the value of the element on the diagonal of the matrix. + + + + + Sorts the elements of the vector in decreasing + fashion. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Sorts the elements of the vector in decreasing + fashion using heap sort algorithm. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Build heap for double indicies + + Root position + Length of + Indicies of + Target + + + + Sift double indicies + + Indicies of + Target + Root position + Length of + + + + Sorts the given integers in a decreasing fashion. + + The values. + + + + Sort the given integers in a decreasing fashion using heapsort algorithm + + Array of values to sort + Length of + + + + Build heap + + Target values array + Root position + Length of + + + + Sift values + + Target value array + Root position + Length of + + + + Exchange values in array + + Target values array + First value to exchange + Second value to exchange + + + + A simple milu(0) preconditioner. + + + Original Fortran code by Youcef Saad (07 January 2004) + + + + Use modified or standard ILU(0) + + + + Gets or sets a value indicating whether to use modified or standard ILU(0). + + + + + Gets a value indicating whether the preconditioner is initialized. + + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square or is not an + instance of SparseCompressedRowMatrixStorage. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector b. + The left hand side vector x. + + + + MILU0 is a simple milu(0) preconditioner. + + Order of the matrix. + Matrix values in CSR format (input). + Column indices (input). + Row pointers (input). + Matrix values in MSR format (output). + Row pointers and column indices (output). + Pointer to diagonal elements (output). + True if the modified/MILU algorithm should be used (recommended) + Returns 0 on success or k > 0 if a zero pivot was encountered at step k. + + + + A Multiple-Lanczos Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Multiple-Lanczos Bi-Conjugate Gradient stabilized (ML(k)-BiCGStab) solver is an 'improvement' + of the standard BiCgStab solver. + + + The algorithm was taken from:
+ ML(k)BiCGSTAB: A BiCGSTAB variant based on multiple Lanczos starting vectors +
+ Man-chung Yeung and Tony F. Chan +
+ SIAM Journal of Scientific Computing +
+ Volume 21, Number 4, pp. 1263 - 1290 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + The default number of starting vectors. + + + + + The collection of starting vectors which are used as the basis for the Krylov sub-space. + + + + + The number of starting vectors used by the algorithm + + + + + Gets or sets the number of starting vectors. + + + Must be larger than 1 and smaller than the number of variables in the matrix that + for which this solver will be used. + + + + + Resets the number of starting vectors to the default value. + + + + + Gets or sets a series of orthonormal vectors which will be used as basis for the + Krylov sub-space. + + + + + Gets the number of starting vectors to create + + Maximum number + Number of variables + Number of starting vectors to create + + + + Returns an array of starting vectors. + + The maximum number of starting vectors that should be created. + The number of variables. + + An array with starting vectors. The array will never be larger than the + but it may be smaller if + the is smaller than + the . + + + + + Create random vectors array + + Number of vectors + Size of each vector + Array of random vectors + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Source A. + Residual data. + x data. + b data. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Transpose Free Quasi-Minimal Residual (TFQMR) iterative matrix solver. + + + + The TFQMR algorithm was taken from:
+ Iterative methods for sparse linear systems. +
+ Yousef Saad +
+ Algorithm is described in Chapter 7, section 7.4.3, page 219 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Is even? + + Number to check + true if even, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Matrix with sparse storage, intended for very large matrices where most of the cells are zero. + The underlying storage scheme is 3-array compressed-sparse-row (CSR) Format. + Wikipedia - CSR. + + + + + Gets the number of non zero elements in the matrix. + + The number of non zero elements. + + + + Create a new sparse matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new sparse matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable. + The enumerable is assumed to be in row-major order (row by row). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + + Create a new sparse matrix with the given number of rows and columns as a copy of the given array. + The array is assumed to be in column-major order (column by column). + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix and initialize each value to the same provided value. + + + + + Create a new sparse matrix and initialize each value using the provided init function. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Evaluates whether this matrix is symmetric. + + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + A vector with sparse storage, intended for very large vectors where most of the cells are zero. + + The sparse vector is not thread safe. + + + + Gets the number of non zero elements in the vector. + + The number of non zero elements. + + + + Create a new sparse vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new sparse vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new sparse vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector and initialize each value using the provided value. + + + + + Create a new sparse vector and initialize each value using the provided init function. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + Warning, the new 'sparse vector' with a non-zero scalar added to it will be a 100% filled + sparse vector and very inefficient. Would be better to work with a dense vector instead. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Negates vector and saves result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Multiplies a vector with a scalar. + + The vector to scale. + The scalar value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a scalar. + + The scalar value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a scalar. + + The vector to divide. + The scalar value. + The result of the division. + If is . + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + of each element of the vector of the given divisor. + + The vector whose elements we want to compute the modulus of. + The divisor to use, + The result of the calculation + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Creates a float sparse vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n,n,..', '(n,n,..)', '[n,n,...]', where n is a float. + + + A float sparse vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a real sparse vector to float-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a real sparse vector to float-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + float version of the class. + + + + + Initializes a new instance of the Vector class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Conjugates vector and save result to + + Target vector + + + + Negates vector and saves result to + + Target vector + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Divides each element of the vector by a scalar and stores the result in the result vector. + + + The scalar to divide with. + + + The vector to store the result of the division. + + + + + Divides a scalar by each element of the vector and stores the result in the result vector. + + The scalar to divide. + The vector to store the result of the division. + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise raise this vector to an exponent and store the result into the result vector. + + The exponent to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Returns the value of the absolute minimum element. + + The value of the absolute minimum element. + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the value of the absolute maximum element. + + The value of the absolute maximum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + + The p value. + + + Scalar ret = ( ∑|At(i)|^p )^(1/p) + + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Normalizes this vector to a unit vector with respect to the p-norm. + + + The p value. + + + This vector normalized to a unit vector with respect to the p-norm. + + + + + A Matrix class with dense storage. The underlying storage is a one dimensional array in column-major order (column by column). + + + + + Number of rows. + + Using this instead of the RowCount property to speed up calculating + a matrix index in the data array. + + + + Number of columns. + + Using this instead of the ColumnCount property to speed up calculating + a matrix index in the data array. + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new dense matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new dense matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to be in column-major order (column by column) and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + + Create a new dense matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable. + The enumerable is assumed to be in column-major order (column by column). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix and initialize each value to the same provided value. + + + + + Create a new dense matrix and initialize each value using the provided init function. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new dense matrix with values sampled from the provided random distribution. + + + + + Gets the matrix's data. + + The matrix's data. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of add + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the matrix and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Evaluates whether this matrix is symmetric. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A vector using dense storage. + + + + + Number of elements + + + + + Gets the vector's data. + + + + + Create a new dense vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new dense vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new dense vector directly binding to a raw array. + The array is used directly without copying. + Very efficient, but changes to the array and the vector will affect each other. + + + + + Create a new dense vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector and initialize each value using the provided value. + + + + + Create a new dense vector and initialize each value using the provided init function. + + + + + Create a new dense vector with values sampled from the provided random distribution. + + + + + Gets the vector's data. + + The vector's data. + + + + Returns a reference to the internal data structure. + + The DenseVector whose internal data we are + returning. + + A reference to the internal date of the given vector. + + + + + Returns a vector bound directly to a reference of the provided array. + + The array to bind to the DenseVector object. + + A DenseVector whose values are bound to the given array. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + The scalar to add. + The vector to store the result of the addition. + + + + Adds another vector to this vector and stores the result into the result vector. + + The vector to add to this one. + The vector to store the result of the addition. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The vector to store the result of the subtraction. + + + + Subtracts another vector from this vector and stores the result into the result vector. + + The vector to subtract from this one. + The vector to store the result of the subtraction. + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Negates vector and saves result to + + Target vector + + + + Conjugates vector and save result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + The scalar to multiply. + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Multiplies a vector with a complex. + + The vector to scale. + The Complex value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a complex. + + The Complex value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a complex. + + The vector to divide. + The Complex value. + The result of the division. + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Creates a Complex dense vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n;n;..', '(n;n;..)', '[n;n;...]', where n is a double. + + + A Complex dense vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a complex dense vector to double-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a complex dense vector to double-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + A matrix type for diagonal matrices. + + + Diagonal matrices can be non-square matrices but the diagonal always starts + at element 0,0. A diagonal matrix will throw an exception if non diagonal + entries are set. The exception to this is when the off diagonal elements are + 0.0 or NaN; these settings will cause no change to the diagonal matrix. + + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new diagonal matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All diagonal cells of the matrix will be initialized to the provided value, all non-diagonal ones to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to contain the diagonal elements only and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new diagonal matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + The matrix to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + The array to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided enumerable. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new diagonal matrix with diagonal values sampled from the provided random distribution. + + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + If the result matrix's dimensions are not the same as this matrix. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to add. + The matrix to store the result of the division. + + + + Computes the determinant of this matrix. + + The determinant of this matrix. + + + + Returns the elements of the diagonal in a . + + The elements of the diagonal. + For non-square matrices, the method returns Min(Rows, Columns) elements where + i == j (i is the row index, and j is the column index). + + + + Copies the values of the given array to the diagonal. + + The array to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + + Copies the values of the given to the diagonal. + + The vector to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced L2 norm of the matrix. + The largest singular value of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the Frobenius norm of this matrix. + The Frobenius norm of this matrix. + + + Calculates the condition number of this matrix. + The condition number of the matrix. + + + Computes the inverse of this matrix. + If is not a square matrix. + If is singular. + The inverse of this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Creates a matrix that contains the values from the requested sub-matrix. + + The row to start copying from. + The number of rows to copy. Must be positive. + The column to start copying from. + The number of columns to copy. Must be positive. + The requested sub-matrix. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + If or + is not positive. + + + + Permute the columns of a matrix according to a permutation. + + The column permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Permute the rows of a matrix according to a permutation. + + The row permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Evaluates whether this matrix is symmetric. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A class which encapsulates the functionality of a Cholesky factorization. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Gets the determinant of the matrix for which the Cholesky matrix was computed. + + + + + Gets the log determinant of the matrix for which the Cholesky matrix was computed. + + + + + A class which encapsulates the functionality of a Cholesky factorization for dense matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a complex matrix. + + + If A is hermitan, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is hermitan. + I.e. A = V*D*V' and V*VH=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Reduces a complex hermitian matrix to a real symmetric tridiagonal matrix using unitary similarity transformations. + + Source matrix to reduce + Output: Arrays for internal storage of real parts of eigenvalues + Output: Arrays for internal storage of imaginary parts of eigenvalues + Output: Arrays that contains further information about the transformations. + Order of initial matrix + This is derived from the Algol procedures HTRIDI by + Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + Data array of matrix V (eigenvectors) + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Determines eigenvectors by undoing the symmetric tridiagonalize transformation + + Data array of matrix V (eigenvectors) + Previously tridiagonalized matrix by . + Contains further information about the transformations + Input matrix order + This is derived from the Algol procedures HTRIBK, by + by Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Nonsymmetric reduction to Hessenberg form. + + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + Data array of the eigenvectors + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any complex square matrix A may be decomposed as A = QR where Q is an unitary mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an unitary matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Factorize matrix using the modified Gram-Schmidt method. + + Initial matrix. On exit is replaced by Q. + Number of rows in Q. + Number of columns in Q. + On exit is filled by R. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Gets or sets Tau vector. Contains additional information on Q - used for native solver. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The type of QR factorization to perform. + If is null. + If row count is less then column count + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + If SVD algorithm failed to converge with matrix . + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Gets the absolute value of determinant of the square matrix for which the EVD was computed. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + In the Math.Net implementation we also store a set of pivot elements for increased + numerical stability. The pivot elements encode a permutation matrix P such that P*A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Gets the determinant of the matrix for which the LU factorization was computed. + + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A (m x n) may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + If a factorization is peformed, the resulting Q matrix is an m x m matrix + and the R matrix is an m x n matrix. If a factorization is performed, the + resulting Q matrix is an m x n matrix and the R matrix is an n x n matrix. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD). + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets the two norm of the . + + The 2-norm of the . + + + + Gets the condition number max(S) / min(S) + + The condition number. + + + + Gets the determinant of the square matrix for which the SVD was computed. + + + + + A class which encapsulates the functionality of a Cholesky factorization for user matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a complex matrix. + + + If A is hermitan, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is hermitan. + I.e. A = V*D*V' and V*VH=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Reduces a complex hermitian matrix to a real symmetric tridiagonal matrix using unitary similarity transformations. + + Source matrix to reduce + Output: Arrays for internal storage of real parts of eigenvalues + Output: Arrays for internal storage of imaginary parts of eigenvalues + Output: Arrays that contains further information about the transformations. + Order of initial matrix + This is derived from the Algol procedures HTRIDI by + Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + The eigen vectors to work on. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Determines eigenvectors by undoing the symmetric tridiagonalize transformation + + The eigen vectors to work on. + Previously tridiagonalized matrix by . + Contains further information about the transformations + Input matrix order + This is derived from the Algol procedures HTRIBK, by + by Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Nonsymmetric reduction to Hessenberg form. + + The eigen vectors to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + The eigen vectors to work on. + The eigen values to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any complex square matrix A may be decomposed as A = QR where Q is an unitary mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an unitary matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The QR factorization method to use. + If is null. + + + + Generate column from initial matrix to work array + + Initial matrix + The first row + Column index + Generated vector + + + + Perform calculation of Q or R + + Work array + Q or R matrices + The first row + The last row + The first column + The last column + Number of available CPUs + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + + + + + Calculates absolute value of multiplied on signum function of + + Complex value z1 + Complex value z2 + Result multiplication of signum function and absolute value + + + + Interchanges two vectors and + + Source matrix + The number of rows in + Column A index to swap + Column B index to swap + + + + Scale column by starting from row + + Source matrix + The number of rows in + Column to scale + Row to scale from + Scale value + + + + Scale vector by starting from index + + Source vector + Row to scale from + Scale value + + + + Given the Cartesian coordinates (da, db) of a point p, these fucntion return the parameters da, db, c, and s + associated with the Givens rotation that zeros the y-coordinate of the point. + + Provides the x-coordinate of the point p. On exit contains the parameter r associated with the Givens rotation + Provides the y-coordinate of the point p. On exit contains the parameter z associated with the Givens rotation + Contains the parameter c associated with the Givens rotation + Contains the parameter s associated with the Givens rotation + This is equivalent to the DROTG LAPACK routine. + + + + Calculate Norm 2 of the column in matrix starting from row + + Source matrix + The number of rows in + Column index + Start row index + Norm2 (Euclidean norm) of the column + + + + Calculate Norm 2 of the vector starting from index + + Source vector + Start index + Norm2 (Euclidean norm) of the vector + + + + Calculate dot product of and conjugating the first vector. + + Source matrix + The number of rows in + Index of column A + Index of column B + Starting row index + Dot product value + + + + Performs rotation of points in the plane. Given two vectors x and y , + each vector element of these vectors is replaced as follows: x(i) = c*x(i) + s*y(i); y(i) = c*y(i) - s*x(i) + + Source matrix + The number of rows in + Index of column A + Index of column B + scalar cos value + scalar sin value + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Complex version of the class. + + + + + Initializes a new instance of the Matrix class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Returns the conjugate transpose of this matrix. + + The conjugate transpose of this matrix. + + + + Puts the conjugate transpose of this matrix into the result matrix. + + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to divide by each element of the matrix. + The matrix to store the result of the division. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The matrix to store the result of the pointwise power. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Pointwise applies the exponential function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Computes the Moore-Penrose Pseudo-Inverse of this matrix. + + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Calculates the p-norms of all row vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the p-norms of all column vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all row vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all column vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the value sum of each row vector. + + + + + Calculates the absolute value sum of each row vector. + + + + + Calculates the value sum of each column vector. + + + + + Calculates the absolute value sum of each column vector. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Bi-Conjugate Gradient Stabilized (BiCGStab) solver is an 'improvement' + of the standard Conjugate Gradient (CG) solver. Unlike the CG solver the + BiCGStab can be used on non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The Bi-CGSTAB algorithm was taken from:
+ Templates for the solution of linear systems: Building blocks + for iterative methods +
+ Richard Barrett, Michael Berry, Tony F. Chan, James Demmel, + June M. Donato, Jack Dongarra, Victor Eijkhout, Roldan Pozo, + Charles Romine and Henk van der Vorst +
+ Url: http://www.netlib.org/templates/Templates.html +
+ Algorithm is described in Chapter 2, section 2.3.8, page 27 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient , A. + The solution , b. + The result , x. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A composite matrix solver. The actual solver is made by a sequence of + matrix solvers. + + + + Solver based on:
+ Faster PDE-based simulations using robust composite linear solvers
+ S. Bhowmicka, P. Raghavan a,*, L. McInnes b, B. Norris
+ Future Generation Computer Systems, Vol 20, 2004, pp 373–387
+
+ + Note that if an iterator is passed to this solver it will be used for all the sub-solvers. + +
+
+ + + The collection of solvers that will be used + + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A diagonal preconditioner. The preconditioner uses the inverse + of the matrix diagonal as preconditioning values. + + + + + The inverse of the matrix diagonal. + + + + + Returns the decomposed matrix diagonal. + + The matrix diagonal. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + A Generalized Product Bi-Conjugate Gradient iterative matrix solver. + + + + The Generalized Product Bi-Conjugate Gradient (GPBiCG) solver is an + alternative version of the Bi-Conjugate Gradient stabilized (CG) solver. + Unlike the CG solver the GPBiCG solver can be used on + non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The GPBiCG algorithm was taken from:
+ GPBiCG(m,l): A hybrid of BiCGSTAB and GPBiCG methods with + efficiency and robustness +
+ S. Fujino +
+ Applied Numerical Mathematics, Volume 41, 2002, pp 107 - 117 +
+
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Indicates the number of BiCGStab steps should be taken + before switching. + + + + + Indicates the number of GPBiCG steps should be taken + before switching. + + + + + Gets or sets the number of steps taken with the BiCgStab algorithm + before switching over to the GPBiCG algorithm. + + + + + Gets or sets the number of steps taken with the GPBiCG algorithm + before switching over to the BiCgStab algorithm. + + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Decide if to do steps with BiCgStab + + Number of iteration + true if yes, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + An incomplete, level 0, LU factorization preconditioner. + + + The ILU(0) algorithm was taken from:
+ Iterative methods for sparse linear systems
+ Yousef Saad
+ Algorithm is described in Chapter 10, section 10.3.2, page 275
+
+
+ + + The matrix holding the lower (L) and upper (U) matrices. The + decomposition matrices are combined to reduce storage. + + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + A new matrix containing the lower triagonal elements. + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + This class performs an Incomplete LU factorization with drop tolerance + and partial pivoting. The drop tolerance indicates which additional entries + will be dropped from the factorized LU matrices. + + + The ILUTP-Mem algorithm was taken from:
+ ILUTP_Mem: a Space-Efficient Incomplete LU Preconditioner +
+ Tzu-Yi Chen, Department of Mathematics and Computer Science,
+ Pomona College, Claremont CA 91711, USA
+ Published in:
+ Lecture Notes in Computer Science
+ Volume 3046 / 2004
+ pp. 20 - 28
+ Algorithm is described in Section 2, page 22 +
+
+ + + The default fill level. + + + + + The default drop tolerance. + + + + + The decomposed upper triangular matrix. + + + + + The decomposed lower triangular matrix. + + + + + The array containing the pivot values. + + + + + The fill level. + + + + + The drop tolerance. + + + + + The pivot tolerance. + + + + + Initializes a new instance of the class with the default settings. + + + + + Initializes a new instance of the class with the specified settings. + + + The amount of fill that is allowed in the matrix. The value is a fraction of + the number of non-zero entries in the original matrix. Values should be positive. + + + The absolute drop tolerance which indicates below what absolute value an entry + will be dropped from the matrix. A drop tolerance of 0.0 means that no values + will be dropped. Values should always be positive. + + + The pivot tolerance which indicates at what level pivoting will take place. A + value of 0.0 means that no pivoting will take place. + + + + + Gets or sets the amount of fill that is allowed in the matrix. The + value is a fraction of the number of non-zero entries in the original + matrix. The standard value is 200. + + + + Values should always be positive and can be higher than 1.0. A value lower + than 1.0 means that the eventual preconditioner matrix will have fewer + non-zero entries as the original matrix. A value higher than 1.0 means that + the eventual preconditioner can have more non-zero values than the original + matrix. + + + Note that any changes to the FillLevel after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the absolute drop tolerance which indicates below what absolute value + an entry will be dropped from the matrix. The standard value is 0.0001. + + + + The values should always be positive and can be larger than 1.0. A low value will + keep more small numbers in the preconditioner matrix. A high value will remove + more small numbers from the preconditioner matrix. + + + Note that any changes to the DropTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the pivot tolerance which indicates at what level pivoting will + take place. The standard value is 0.0 which means pivoting will never take place. + + + + The pivot tolerance is used to calculate if pivoting is necessary. Pivoting + will take place if any of the values in a row is bigger than the + diagonal value of that row divided by the pivot tolerance, i.e. pivoting + will take place if row(i,j) > row(i,i) / PivotTolerance for + any j that is not equal to i. + + + Note that any changes to the PivotTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the lower triagonal elements. + + + + Returns the pivot array. This array is not needed for normal use because + the preconditioner will return the solution vector values in the proper order. + + + This method is used for debugging purposes only and should normally not be used. + + The pivot array. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. Note that the + method takes a general matrix type. However internally the data is stored + as a sparse matrix. Therefore it is not recommended to pass a dense matrix. + + If is . + If is not a square matrix. + + + + Pivot elements in the according to internal pivot array + + Row to pivot in + + + + Was pivoting already performed + + Pivots already done + Current item to pivot + true if performed, otherwise false + + + + Swap columns in the + + Source . + First column index to swap + Second column index to swap + + + + Sort vector descending, not changing vector but placing sorted indicies to + + Start sort form + Sort till upper bound + Array with sorted vector indicies + Source + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + Pivot elements in according to internal pivot array + + Source . + Result after pivoting. + + + + An element sort algorithm for the class. + + + This sort algorithm is used to sort the columns in a sparse matrix based on + the value of the element on the diagonal of the matrix. + + + + + Sorts the elements of the vector in decreasing + fashion. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Sorts the elements of the vector in decreasing + fashion using heap sort algorithm. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Build heap for double indicies + + Root position + Length of + Indicies of + Target + + + + Sift double indicies + + Indicies of + Target + Root position + Length of + + + + Sorts the given integers in a decreasing fashion. + + The values. + + + + Sort the given integers in a decreasing fashion using heapsort algorithm + + Array of values to sort + Length of + + + + Build heap + + Target values array + Root position + Length of + + + + Sift values + + Target value array + Root position + Length of + + + + Exchange values in array + + Target values array + First value to exchange + Second value to exchange + + + + A simple milu(0) preconditioner. + + + Original Fortran code by Youcef Saad (07 January 2004) + + + + Use modified or standard ILU(0) + + + + Gets or sets a value indicating whether to use modified or standard ILU(0). + + + + + Gets a value indicating whether the preconditioner is initialized. + + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square or is not an + instance of SparseCompressedRowMatrixStorage. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector b. + The left hand side vector x. + + + + MILU0 is a simple milu(0) preconditioner. + + Order of the matrix. + Matrix values in CSR format (input). + Column indices (input). + Row pointers (input). + Matrix values in MSR format (output). + Row pointers and column indices (output). + Pointer to diagonal elements (output). + True if the modified/MILU algorithm should be used (recommended) + Returns 0 on success or k > 0 if a zero pivot was encountered at step k. + + + + A Multiple-Lanczos Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Multiple-Lanczos Bi-Conjugate Gradient stabilized (ML(k)-BiCGStab) solver is an 'improvement' + of the standard BiCgStab solver. + + + The algorithm was taken from:
+ ML(k)BiCGSTAB: A BiCGSTAB variant based on multiple Lanczos starting vectors +
+ Man-chung Yeung and Tony F. Chan +
+ SIAM Journal of Scientific Computing +
+ Volume 21, Number 4, pp. 1263 - 1290 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + The default number of starting vectors. + + + + + The collection of starting vectors which are used as the basis for the Krylov sub-space. + + + + + The number of starting vectors used by the algorithm + + + + + Gets or sets the number of starting vectors. + + + Must be larger than 1 and smaller than the number of variables in the matrix that + for which this solver will be used. + + + + + Resets the number of starting vectors to the default value. + + + + + Gets or sets a series of orthonormal vectors which will be used as basis for the + Krylov sub-space. + + + + + Gets the number of starting vectors to create + + Maximum number + Number of variables + Number of starting vectors to create + + + + Returns an array of starting vectors. + + The maximum number of starting vectors that should be created. + The number of variables. + + An array with starting vectors. The array will never be larger than the + but it may be smaller if + the is smaller than + the . + + + + + Create random vecrors array + + Number of vectors + Size of each vector + Array of random vectors + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Source A. + Residual data. + x data. + b data. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Transpose Free Quasi-Minimal Residual (TFQMR) iterative matrix solver. + + + + The TFQMR algorithm was taken from:
+ Iterative methods for sparse linear systems. +
+ Yousef Saad +
+ Algorithm is described in Chapter 7, section 7.4.3, page 219 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Is even? + + Number to check + true if even, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Matrix with sparse storage, intended for very large matrices where most of the cells are zero. + The underlying storage scheme is 3-array compressed-sparse-row (CSR) Format. + Wikipedia - CSR. + + + + + Gets the number of non zero elements in the matrix. + + The number of non zero elements. + + + + Create a new sparse matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new sparse matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable. + The enumerable is assumed to be in row-major order (row by row). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + + Create a new sparse matrix with the given number of rows and columns as a copy of the given array. + The array is assumed to be in column-major order (column by column). + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix and initialize each value to the same provided value. + + + + + Create a new sparse matrix and initialize each value using the provided init function. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Evaluates whether this matrix is symmetric. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + A vector with sparse storage, intended for very large vectors where most of the cells are zero. + + The sparse vector is not thread safe. + + + + Gets the number of non zero elements in the vector. + + The number of non zero elements. + + + + Create a new sparse vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new sparse vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new sparse vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector and initialize each value using the provided value. + + + + + Create a new sparse vector and initialize each value using the provided init function. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + Warning, the new 'sparse vector' with a non-zero scalar added to it will be a 100% filled + sparse vector and very inefficient. Would be better to work with a dense vector instead. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Negates vector and saves result to + + Target vector + + + + Conjugates vector and save result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Multiplies a vector with a complex. + + The vector to scale. + The complex value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a complex. + + The complex value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a complex. + + The vector to divide. + The complex value. + The result of the division. + If is . + + + + Computes the modulus of each element of the vector of the given divisor. + + The vector whose elements we want to compute the modulus of. + The divisor to use, + The result of the calculation + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Creates a double sparse vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n;n;..', '(n;n;..)', '[n;n;...]', where n is a Complex. + + + A double sparse vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a complex sparse vector to double-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a complex sparse vector to double-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Complex version of the class. + + + + + Initializes a new instance of the Vector class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Conjugates vector and save result to + + Target vector + + + + Negates vector and saves result to + + Target vector + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Divides each element of the vector by a scalar and stores the result in the result vector. + + + The scalar to divide with. + + + The vector to store the result of the division. + + + + + Divides a scalar by each element of the vector and stores the result in the result vector. + + The scalar to divide. + The vector to store the result of the division. + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise raise this vector to an exponent and store the result into the result vector. + + The exponent to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Returns the value of the absolute minimum element. + + The value of the absolute minimum element. + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the value of the absolute maximum element. + + The value of the absolute maximum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + + The p value. + + + Scalar ret = ( ∑|At(i)|^p )^(1/p) + + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Normalizes this vector to a unit vector with respect to the p-norm. + + + The p value. + + + This vector normalized to a unit vector with respect to the p-norm. + + + + + A Matrix class with dense storage. The underlying storage is a one dimensional array in column-major order (column by column). + + + + + Number of rows. + + Using this instead of the RowCount property to speed up calculating + a matrix index in the data array. + + + + Number of columns. + + Using this instead of the ColumnCount property to speed up calculating + a matrix index in the data array. + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new dense matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new dense matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to be in column-major order (column by column) and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + + Create a new dense matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable. + The enumerable is assumed to be in column-major order (column by column). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix and initialize each value to the same provided value. + + + + + Create a new dense matrix and initialize each value using the provided init function. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new dense matrix with values sampled from the provided random distribution. + + + + + Gets the matrix's data. + + The matrix's data. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of add + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the matrix and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Evaluates whether this matrix is symmetric. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A vector using dense storage. + + + + + Number of elements + + + + + Gets the vector's data. + + + + + Create a new dense vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new dense vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new dense vector directly binding to a raw array. + The array is used directly without copying. + Very efficient, but changes to the array and the vector will affect each other. + + + + + Create a new dense vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector and initialize each value using the provided value. + + + + + Create a new dense vector and initialize each value using the provided init function. + + + + + Create a new dense vector with values sampled from the provided random distribution. + + + + + Gets the vector's data. + + The vector's data. + + + + Returns a reference to the internal data structure. + + The DenseVector whose internal data we are + returning. + + A reference to the internal date of the given vector. + + + + + Returns a vector bound directly to a reference of the provided array. + + The array to bind to the DenseVector object. + + A DenseVector whose values are bound to the given array. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + The scalar to add. + The vector to store the result of the addition. + + + + Adds another vector to this vector and stores the result into the result vector. + + The vector to add to this one. + The vector to store the result of the addition. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The vector to store the result of the subtraction. + + + + Subtracts another vector from this vector and stores the result into the result vector. + + The vector to subtract from this one. + The vector to store the result of the subtraction. + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Negates vector and saves result to + + Target vector + + + + Conjugates vector and save result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + The scalar to multiply. + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Multiplies a vector with a complex. + + The vector to scale. + The Complex32 value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a complex. + + The Complex32 value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a complex. + + The vector to divide. + The Complex32 value. + The result of the division. + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Creates a Complex32 dense vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n;n;..', '(n;n;..)', '[n;n;...]', where n is a double. + + + A Complex32 dense vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a complex dense vector to double-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a complex dense vector to double-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + A matrix type for diagonal matrices. + + + Diagonal matrices can be non-square matrices but the diagonal always starts + at element 0,0. A diagonal matrix will throw an exception if non diagonal + entries are set. The exception to this is when the off diagonal elements are + 0.0 or NaN; these settings will cause no change to the diagonal matrix. + + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new diagonal matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All diagonal cells of the matrix will be initialized to the provided value, all non-diagonal ones to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to contain the diagonal elements only and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new diagonal matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + The matrix to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + The array to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided enumerable. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new diagonal matrix with diagonal values sampled from the provided random distribution. + + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to add. + The matrix to store the result of the division. + + + + Computes the determinant of this matrix. + + The determinant of this matrix. + + + + Returns the elements of the diagonal in a . + + The elements of the diagonal. + For non-square matrices, the method returns Min(Rows, Columns) elements where + i == j (i is the row index, and j is the column index). + + + + Copies the values of the given array to the diagonal. + + The array to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + + Copies the values of the given to the diagonal. + + The vector to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced L2 norm of the matrix. + The largest singular value of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + Calculates the condition number of this matrix. + The condition number of the matrix. + + + Computes the inverse of this matrix. + If is not a square matrix. + If is singular. + The inverse of this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Creates a matrix that contains the values from the requested sub-matrix. + + The row to start copying from. + The number of rows to copy. Must be positive. + The column to start copying from. + The number of columns to copy. Must be positive. + The requested sub-matrix. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + If or + is not positive. + + + + Permute the columns of a matrix according to a permutation. + + The column permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Permute the rows of a matrix according to a permutation. + + The row permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Evaluates whether this matrix is symmetric. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A class which encapsulates the functionality of a Cholesky factorization. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Gets the determinant of the matrix for which the Cholesky matrix was computed. + + + + + Gets the log determinant of the matrix for which the Cholesky matrix was computed. + + + + + A class which encapsulates the functionality of a Cholesky factorization for dense matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a complex matrix. + + + If A is hermitan, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is hermitan. + I.e. A = V*D*V' and V*VH=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Reduces a complex hermitian matrix to a real symmetric tridiagonal matrix using unitary similarity transformations. + + Source matrix to reduce + Output: Arrays for internal storage of real parts of eigenvalues + Output: Arrays for internal storage of imaginary parts of eigenvalues + Output: Arrays that contains further information about the transformations. + Order of initial matrix + This is derived from the Algol procedures HTRIDI by + Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + Data array of matrix V (eigenvectors) + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Determines eigenvectors by undoing the symmetric tridiagonalize transformation + + Data array of matrix V (eigenvectors) + Previously tridiagonalized matrix by . + Contains further information about the transformations + Input matrix order + This is derived from the Algol procedures HTRIBK, by + by Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Nonsymmetric reduction to Hessenberg form. + + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + Data array of the eigenvectors + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any complex square matrix A may be decomposed as A = QR where Q is an unitary mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an unitary matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Factorize matrix using the modified Gram-Schmidt method. + + Initial matrix. On exit is replaced by Q. + Number of rows in Q. + Number of columns in Q. + On exit is filled by R. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Gets or sets Tau vector. Contains additional information on Q - used for native solver. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The QR factorization method to use. + If is null. + If row count is less then column count + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + If SVD algorithm failed to converge with matrix . + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Gets the absolute value of determinant of the square matrix for which the EVD was computed. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + In the Math.Net implementation we also store a set of pivot elements for increased + numerical stability. The pivot elements encode a permutation matrix P such that P*A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Gets the determinant of the matrix for which the LU factorization was computed. + + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A (m x n) may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + If a factorization is peformed, the resulting Q matrix is an m x m matrix + and the R matrix is an m x n matrix. If a factorization is performed, the + resulting Q matrix is an m x n matrix and the R matrix is an n x n matrix. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD). + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets the two norm of the . + + The 2-norm of the . + + + + Gets the condition number max(S) / min(S) + + The condition number. + + + + Gets the determinant of the square matrix for which the SVD was computed. + + + + + A class which encapsulates the functionality of a Cholesky factorization for user matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a complex matrix. + + + If A is hermitan, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is hermitan. + I.e. A = V*D*V' and V*VH=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Reduces a complex hermitian matrix to a real symmetric tridiagonal matrix using unitary similarity transformations. + + Source matrix to reduce + Output: Arrays for internal storage of real parts of eigenvalues + Output: Arrays for internal storage of imaginary parts of eigenvalues + Output: Arrays that contains further information about the transformations. + Order of initial matrix + This is derived from the Algol procedures HTRIDI by + Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + The eigen vectors to work on. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Determines eigenvectors by undoing the symmetric tridiagonalize transformation + + The eigen vectors to work on. + Previously tridiagonalized matrix by . + Contains further information about the transformations + Input matrix order + This is derived from the Algol procedures HTRIBK, by + by Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Nonsymmetric reduction to Hessenberg form. + + The eigen vectors to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + The eigen vectors to work on. + The eigen values to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any complex square matrix A may be decomposed as A = QR where Q is an unitary mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an unitary matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The QR factorization method to use. + If is null. + + + + Generate column from initial matrix to work array + + Initial matrix + The first row + Column index + Generated vector + + + + Perform calculation of Q or R + + Work array + Q or R matrices + The first row + The last row + The first column + The last column + Number of available CPUs + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + + + + + Calculates absolute value of multiplied on signum function of + + Complex32 value z1 + Complex32 value z2 + Result multiplication of signum function and absolute value + + + + Interchanges two vectors and + + Source matrix + The number of rows in + Column A index to swap + Column B index to swap + + + + Scale column by starting from row + + Source matrix + The number of rows in + Column to scale + Row to scale from + Scale value + + + + Scale vector by starting from index + + Source vector + Row to scale from + Scale value + + + + Given the Cartesian coordinates (da, db) of a point p, these fucntion return the parameters da, db, c, and s + associated with the Givens rotation that zeros the y-coordinate of the point. + + Provides the x-coordinate of the point p. On exit contains the parameter r associated with the Givens rotation + Provides the y-coordinate of the point p. On exit contains the parameter z associated with the Givens rotation + Contains the parameter c associated with the Givens rotation + Contains the parameter s associated with the Givens rotation + This is equivalent to the DROTG LAPACK routine. + + + + Calculate Norm 2 of the column in matrix starting from row + + Source matrix + The number of rows in + Column index + Start row index + Norm2 (Euclidean norm) of the column + + + + Calculate Norm 2 of the vector starting from index + + Source vector + Start index + Norm2 (Euclidean norm) of the vector + + + + Calculate dot product of and conjugating the first vector. + + Source matrix + The number of rows in + Index of column A + Index of column B + Starting row index + Dot product value + + + + Performs rotation of points in the plane. Given two vectors x and y , + each vector element of these vectors is replaced as follows: x(i) = c*x(i) + s*y(i); y(i) = c*y(i) - s*x(i) + + Source matrix + The number of rows in + Index of column A + Index of column B + scalar cos value + scalar sin value + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Complex32 version of the class. + + + + + Initializes a new instance of the Matrix class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Returns the conjugate transpose of this matrix. + + The conjugate transpose of this matrix. + + + + Puts the conjugate transpose of this matrix into the result matrix. + + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to divide by each element of the matrix. + The matrix to store the result of the division. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The matrix to store the result of the pointwise power. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Pointwise applies the exponential function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Computes the Moore-Penrose Pseudo-Inverse of this matrix. + + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Calculates the p-norms of all row vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the p-norms of all column vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all row vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all column vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the value sum of each row vector. + + + + + Calculates the absolute value sum of each row vector. + + + + + Calculates the value sum of each column vector. + + + + + Calculates the absolute value sum of each column vector. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Bi-Conjugate Gradient Stabilized (BiCGStab) solver is an 'improvement' + of the standard Conjugate Gradient (CG) solver. Unlike the CG solver the + BiCGStab can be used on non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The Bi-CGSTAB algorithm was taken from:
+ Templates for the solution of linear systems: Building blocks + for iterative methods +
+ Richard Barrett, Michael Berry, Tony F. Chan, James Demmel, + June M. Donato, Jack Dongarra, Victor Eijkhout, Roldan Pozo, + Charles Romine and Henk van der Vorst +
+ Url: http://www.netlib.org/templates/Templates.html +
+ Algorithm is described in Chapter 2, section 2.3.8, page 27 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient , A. + The solution , b. + The result , x. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A composite matrix solver. The actual solver is made by a sequence of + matrix solvers. + + + + Solver based on:
+ Faster PDE-based simulations using robust composite linear solvers
+ S. Bhowmicka, P. Raghavan a,*, L. McInnes b, B. Norris
+ Future Generation Computer Systems, Vol 20, 2004, pp 373–387
+
+ + Note that if an iterator is passed to this solver it will be used for all the sub-solvers. + +
+
+ + + The collection of solvers that will be used + + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A diagonal preconditioner. The preconditioner uses the inverse + of the matrix diagonal as preconditioning values. + + + + + The inverse of the matrix diagonal. + + + + + Returns the decomposed matrix diagonal. + + The matrix diagonal. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + A Generalized Product Bi-Conjugate Gradient iterative matrix solver. + + + + The Generalized Product Bi-Conjugate Gradient (GPBiCG) solver is an + alternative version of the Bi-Conjugate Gradient stabilized (CG) solver. + Unlike the CG solver the GPBiCG solver can be used on + non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The GPBiCG algorithm was taken from:
+ GPBiCG(m,l): A hybrid of BiCGSTAB and GPBiCG methods with + efficiency and robustness +
+ S. Fujino +
+ Applied Numerical Mathematics, Volume 41, 2002, pp 107 - 117 +
+
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Indicates the number of BiCGStab steps should be taken + before switching. + + + + + Indicates the number of GPBiCG steps should be taken + before switching. + + + + + Gets or sets the number of steps taken with the BiCgStab algorithm + before switching over to the GPBiCG algorithm. + + + + + Gets or sets the number of steps taken with the GPBiCG algorithm + before switching over to the BiCgStab algorithm. + + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Decide if to do steps with BiCgStab + + Number of iteration + true if yes, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + An incomplete, level 0, LU factorization preconditioner. + + + The ILU(0) algorithm was taken from:
+ Iterative methods for sparse linear systems
+ Yousef Saad
+ Algorithm is described in Chapter 10, section 10.3.2, page 275
+
+
+ + + The matrix holding the lower (L) and upper (U) matrices. The + decomposition matrices are combined to reduce storage. + + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + A new matrix containing the lower triagonal elements. + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + This class performs an Incomplete LU factorization with drop tolerance + and partial pivoting. The drop tolerance indicates which additional entries + will be dropped from the factorized LU matrices. + + + The ILUTP-Mem algorithm was taken from:
+ ILUTP_Mem: a Space-Efficient Incomplete LU Preconditioner +
+ Tzu-Yi Chen, Department of Mathematics and Computer Science,
+ Pomona College, Claremont CA 91711, USA
+ Published in:
+ Lecture Notes in Computer Science
+ Volume 3046 / 2004
+ pp. 20 - 28
+ Algorithm is described in Section 2, page 22 +
+
+ + + The default fill level. + + + + + The default drop tolerance. + + + + + The decomposed upper triangular matrix. + + + + + The decomposed lower triangular matrix. + + + + + The array containing the pivot values. + + + + + The fill level. + + + + + The drop tolerance. + + + + + The pivot tolerance. + + + + + Initializes a new instance of the class with the default settings. + + + + + Initializes a new instance of the class with the specified settings. + + + The amount of fill that is allowed in the matrix. The value is a fraction of + the number of non-zero entries in the original matrix. Values should be positive. + + + The absolute drop tolerance which indicates below what absolute value an entry + will be dropped from the matrix. A drop tolerance of 0.0 means that no values + will be dropped. Values should always be positive. + + + The pivot tolerance which indicates at what level pivoting will take place. A + value of 0.0 means that no pivoting will take place. + + + + + Gets or sets the amount of fill that is allowed in the matrix. The + value is a fraction of the number of non-zero entries in the original + matrix. The standard value is 200. + + + + Values should always be positive and can be higher than 1.0. A value lower + than 1.0 means that the eventual preconditioner matrix will have fewer + non-zero entries as the original matrix. A value higher than 1.0 means that + the eventual preconditioner can have more non-zero values than the original + matrix. + + + Note that any changes to the FillLevel after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the absolute drop tolerance which indicates below what absolute value + an entry will be dropped from the matrix. The standard value is 0.0001. + + + + The values should always be positive and can be larger than 1.0. A low value will + keep more small numbers in the preconditioner matrix. A high value will remove + more small numbers from the preconditioner matrix. + + + Note that any changes to the DropTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the pivot tolerance which indicates at what level pivoting will + take place. The standard value is 0.0 which means pivoting will never take place. + + + + The pivot tolerance is used to calculate if pivoting is necessary. Pivoting + will take place if any of the values in a row is bigger than the + diagonal value of that row divided by the pivot tolerance, i.e. pivoting + will take place if row(i,j) > row(i,i) / PivotTolerance for + any j that is not equal to i. + + + Note that any changes to the PivotTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the lower triagonal elements. + + + + Returns the pivot array. This array is not needed for normal use because + the preconditioner will return the solution vector values in the proper order. + + + This method is used for debugging purposes only and should normally not be used. + + The pivot array. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. Note that the + method takes a general matrix type. However internally the data is stored + as a sparse matrix. Therefore it is not recommended to pass a dense matrix. + + If is . + If is not a square matrix. + + + + Pivot elements in the according to internal pivot array + + Row to pivot in + + + + Was pivoting already performed + + Pivots already done + Current item to pivot + true if performed, otherwise false + + + + Swap columns in the + + Source . + First column index to swap + Second column index to swap + + + + Sort vector descending, not changing vector but placing sorted indicies to + + Start sort form + Sort till upper bound + Array with sorted vector indicies + Source + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + Pivot elements in according to internal pivot array + + Source . + Result after pivoting. + + + + An element sort algorithm for the class. + + + This sort algorithm is used to sort the columns in a sparse matrix based on + the value of the element on the diagonal of the matrix. + + + + + Sorts the elements of the vector in decreasing + fashion. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Sorts the elements of the vector in decreasing + fashion using heap sort algorithm. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Build heap for double indicies + + Root position + Length of + Indicies of + Target + + + + Sift double indicies + + Indicies of + Target + Root position + Length of + + + + Sorts the given integers in a decreasing fashion. + + The values. + + + + Sort the given integers in a decreasing fashion using heapsort algorithm + + Array of values to sort + Length of + + + + Build heap + + Target values array + Root position + Length of + + + + Sift values + + Target value array + Root position + Length of + + + + Exchange values in array + + Target values array + First value to exchange + Second value to exchange + + + + A simple milu(0) preconditioner. + + + Original Fortran code by Youcef Saad (07 January 2004) + + + + Use modified or standard ILU(0) + + + + Gets or sets a value indicating whether to use modified or standard ILU(0). + + + + + Gets a value indicating whether the preconditioner is initialized. + + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square or is not an + instance of SparseCompressedRowMatrixStorage. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector b. + The left hand side vector x. + + + + MILU0 is a simple milu(0) preconditioner. + + Order of the matrix. + Matrix values in CSR format (input). + Column indices (input). + Row pointers (input). + Matrix values in MSR format (output). + Row pointers and column indices (output). + Pointer to diagonal elements (output). + True if the modified/MILU algorithm should be used (recommended) + Returns 0 on success or k > 0 if a zero pivot was encountered at step k. + + + + A Multiple-Lanczos Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Multiple-Lanczos Bi-Conjugate Gradient stabilized (ML(k)-BiCGStab) solver is an 'improvement' + of the standard BiCgStab solver. + + + The algorithm was taken from:
+ ML(k)BiCGSTAB: A BiCGSTAB variant based on multiple Lanczos starting vectors +
+ Man-chung Yeung and Tony F. Chan +
+ SIAM Journal of Scientific Computing +
+ Volume 21, Number 4, pp. 1263 - 1290 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + The default number of starting vectors. + + + + + The collection of starting vectors which are used as the basis for the Krylov sub-space. + + + + + The number of starting vectors used by the algorithm + + + + + Gets or sets the number of starting vectors. + + + Must be larger than 1 and smaller than the number of variables in the matrix that + for which this solver will be used. + + + + + Resets the number of starting vectors to the default value. + + + + + Gets or sets a series of orthonormal vectors which will be used as basis for the + Krylov sub-space. + + + + + Gets the number of starting vectors to create + + Maximum number + Number of variables + Number of starting vectors to create + + + + Returns an array of starting vectors. + + The maximum number of starting vectors that should be created. + The number of variables. + + An array with starting vectors. The array will never be larger than the + but it may be smaller if + the is smaller than + the . + + + + + Create random vectors array + + Number of vectors + Size of each vector + Array of random vectors + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Source A. + Residual data. + x data. + b data. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Transpose Free Quasi-Minimal Residual (TFQMR) iterative matrix solver. + + + + The TFQMR algorithm was taken from:
+ Iterative methods for sparse linear systems. +
+ Yousef Saad +
+ Algorithm is described in Chapter 7, section 7.4.3, page 219 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Is even? + + Number to check + true if even, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Matrix with sparse storage, intended for very large matrices where most of the cells are zero. + The underlying storage scheme is 3-array compressed-sparse-row (CSR) Format. + Wikipedia - CSR. + + + + + Gets the number of non zero elements in the matrix. + + The number of non zero elements. + + + + Create a new sparse matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new sparse matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable. + The enumerable is assumed to be in row-major order (row by row). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + + Create a new sparse matrix with the given number of rows and columns as a copy of the given array. + The array is assumed to be in column-major order (column by column). + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix and initialize each value to the same provided value. + + + + + Create a new sparse matrix and initialize each value using the provided init function. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Evaluates whether this matrix is symmetric. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + A vector with sparse storage, intended for very large vectors where most of the cells are zero. + + The sparse vector is not thread safe. + + + + Gets the number of non zero elements in the vector. + + The number of non zero elements. + + + + Create a new sparse vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new sparse vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new sparse vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector and initialize each value using the provided value. + + + + + Create a new sparse vector and initialize each value using the provided init function. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + Warning, the new 'sparse vector' with a non-zero scalar added to it will be a 100% filled + sparse vector and very inefficient. Would be better to work with a dense vector instead. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Negates vector and saves result to + + Target vector + + + + Conjugates vector and save result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Multiplies a vector with a complex. + + The vector to scale. + The complex value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a complex. + + The complex value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a complex. + + The vector to divide. + The complex value. + The result of the division. + If is . + + + + Computes the modulus of each element of the vector of the given divisor. + + The vector whose elements we want to compute the modulus of. + The divisor to use, + The result of the calculation + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Creates a double sparse vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n;n;..', '(n;n;..)', '[n;n;...]', where n is a Complex32. + + + A double sparse vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a complex sparse vector to double-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a complex sparse vector to double-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Complex32 version of the class. + + + + + Initializes a new instance of the Vector class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Conjugates vector and save result to + + Target vector + + + + Negates vector and saves result to + + Target vector + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Divides each element of the vector by a scalar and stores the result in the result vector. + + + The scalar to divide with. + + + The vector to store the result of the division. + + + + + Divides a scalar by each element of the vector and stores the result in the result vector. + + The scalar to divide. + The vector to store the result of the division. + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise raise this vector to an exponent and store the result into the result vector. + + The exponent to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Returns the value of the absolute minimum element. + + The value of the absolute minimum element. + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the value of the absolute maximum element. + + The value of the absolute maximum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + + The p value. + + + Scalar ret = ( ∑|At(i)|^p )^(1/p) + + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Normalizes this vector to a unit vector with respect to the p-norm. + + + The p value. + + + This vector normalized to a unit vector with respect to the p-norm. + + + + + Generic linear algebra type builder, for situations where a matrix or vector + must be created in a generic way. Usage of generic builders should not be + required in normal user code. + + + + + Gets the value of 0.0 for type T. + + + + + Gets the value of 1.0 for type T. + + + + + Create a new matrix straight from an initialized matrix storage instance. + If you have an instance of a discrete storage type instead, use their direct methods instead. + + + + + Create a new matrix with the same kind of the provided example. + + + + + Create a new matrix with the same kind and dimensions of the provided example. + + + + + Create a new matrix with the same kind of the provided example. + + + + + Create a new matrix with a type that can represent and is closest to both provided samples. + + + + + Create a new matrix with a type that can represent and is closest to both provided samples and the dimensions of example. + + + + + Create a new dense matrix with values sampled from the provided random distribution. + + + + + Create a new dense matrix with values sampled from the standard distribution with a system random source. + + + + + Create a new dense matrix with values sampled from the standard distribution with a system random source. + + + + + Create a new positive definite dense matrix where each value is the product + of two samples from the provided random distribution. + + + + + Create a new positive definite dense matrix where each value is the product + of two samples from the standard distribution. + + + + + Create a new positive definite dense matrix where each value is the product + of two samples from the provided random distribution. + + + + + Create a new dense matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + + + + Create a new dense matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to be in column-major order (column by column) and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + + Create a new dense matrix and initialize each value to the same provided value. + + + + + Create a new dense matrix and initialize each value using the provided init function. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value using the provided init function. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new dense matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable. + The enumerable is assumed to be in column-major order (column by column). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable. + The enumerable is assumed to be in row-major order (row by row). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix from a 2D array of existing matrices. + The matrices in the array are not required to be dense already. + If the matrices do not align properly, they are placed on the top left + corner of their cell with the remaining fields left zero. + + + + + Create a new sparse matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a sparse matrix of T with the given number of rows and columns. + + The number of rows. + The number of columns. + + + + Create a new sparse matrix and initialize each value to the same provided value. + + + + + Create a new sparse matrix and initialize each value using the provided init function. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value using the provided init function. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new sparse matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable. + The enumerable is assumed to be in row-major order (row by row). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + + Create a new sparse matrix with the given number of rows and columns as a copy of the given array. + The array is assumed to be in column-major order (column by column). + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix from a 2D array of existing matrices. + The matrices in the array are not required to be sparse already. + If the matrices do not align properly, they are placed on the top left + corner of their cell with the remaining fields left zero. + + + + + Create a new diagonal matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + + + + Create a new diagonal matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to represent the diagonal values and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new square diagonal matrix directly binding to a raw array. + The array is assumed to represent the diagonal values and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new diagonal matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal matrix and initialize each diagonal value using the provided init function. + + + + + Create a new diagonal identity matrix with a one-diagonal. + + + + + Create a new diagonal identity matrix with a one-diagonal. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Generic linear algebra type builder, for situations where a matrix or vector + must be created in a generic way. Usage of generic builders should not be + required in normal user code. + + + + + Gets the value of 0.0 for type T. + + + + + Gets the value of 1.0 for type T. + + + + + Create a new vector straight from an initialized matrix storage instance. + If you have an instance of a discrete storage type instead, use their direct methods instead. + + + + + Create a new vector with the same kind of the provided example. + + + + + Create a new vector with the same kind and dimension of the provided example. + + + + + Create a new vector with the same kind of the provided example. + + + + + Create a new vector with a type that can represent and is closest to both provided samples. + + + + + Create a new vector with a type that can represent and is closest to both provided samples and the dimensions of example. + + + + + Create a new vector with a type that can represent and is closest to both provided samples. + + + + + Create a new dense vector with values sampled from the provided random distribution. + + + + + Create a new dense vector with values sampled from the standard distribution with a system random source. + + + + + Create a new dense vector with values sampled from the standard distribution with a system random source. + + + + + Create a new dense vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a dense vector of T with the given size. + + The size of the vector. + + + + Create a dense vector of T that is directly bound to the specified array. + + + + + Create a new dense vector and initialize each value using the provided value. + + + + + Create a new dense vector and initialize each value using the provided init function. + + + + + Create a new dense vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a sparse vector of T with the given size. + + The size of the vector. + + + + Create a new sparse vector and initialize each value using the provided value. + + + + + Create a new sparse vector and initialize each value using the provided init function. + + + + + Create a new sparse vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new matrix straight from an initialized matrix storage instance. + If you have an instance of a discrete storage type instead, use their direct methods instead. + + + + + Create a new matrix with the same kind of the provided example. + + + + + Create a new matrix with the same kind and dimensions of the provided example. + + + + + Create a new matrix with the same kind of the provided example. + + + + + Create a new matrix with a type that can represent and is closest to both provided samples. + + + + + Create a new matrix with a type that can represent and is closest to both provided samples and the dimensions of example. + + + + + Create a new dense matrix with values sampled from the provided random distribution. + + + + + Create a new dense matrix with values sampled from the standard distribution with a system random source. + + + + + Create a new dense matrix with values sampled from the standard distribution with a system random source. + + + + + Create a new positive definite dense matrix where each value is the product + of two samples from the provided random distribution. + + + + + Create a new positive definite dense matrix where each value is the product + of two samples from the standard distribution. + + + + + Create a new positive definite dense matrix where each value is the product + of two samples from the provided random distribution. + + + + + Create a new dense matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + + + + Create a new dense matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to be in column-major order (column by column) and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + + Create a new dense matrix and initialize each value to the same provided value. + + + + + Create a new dense matrix and initialize each value using the provided init function. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value using the provided init function. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new dense matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable. + The enumerable is assumed to be in column-major order (column by column). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix from a 2D array of existing matrices. + The matrices in the array are not required to be dense already. + If the matrices do not align properly, they are placed on the top left + corner of their cell with the remaining fields left zero. + + + + + Create a new sparse matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a sparse matrix of T with the given number of rows and columns. + + The number of rows. + The number of columns. + + + + Create a new sparse matrix and initialize each value to the same provided value. + + + + + Create a new sparse matrix and initialize each value using the provided init function. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value using the provided init function. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new sparse matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable. + The enumerable is assumed to be in row-major order (row by row). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + + Create a new sparse matrix with the given number of rows and columns as a copy of the given array. + The array is assumed to be in column-major order (column by column). + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix from a 2D array of existing matrices. + The matrices in the array are not required to be sparse already. + If the matrices do not align properly, they are placed on the top left + corner of their cell with the remaining fields left zero. + + + + + Create a new diagonal matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + + + + Create a new diagonal matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to represent the diagonal values and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new square diagonal matrix directly binding to a raw array. + The array is assumed to represent the diagonal values and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new diagonal matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal matrix and initialize each diagonal value using the provided init function. + + + + + Create a new diagonal identity matrix with a one-diagonal. + + + + + Create a new diagonal identity matrix with a one-diagonal. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new vector straight from an initialized matrix storage instance. + If you have an instance of a discrete storage type instead, use their direct methods instead. + + + + + Create a new vector with the same kind of the provided example. + + + + + Create a new vector with the same kind and dimension of the provided example. + + + + + Create a new vector with the same kind of the provided example. + + + + + Create a new vector with a type that can represent and is closest to both provided samples. + + + + + Create a new vector with a type that can represent and is closest to both provided samples and the dimensions of example. + + + + + Create a new vector with a type that can represent and is closest to both provided samples. + + + + + Create a new dense vector with values sampled from the provided random distribution. + + + + + Create a new dense vector with values sampled from the standard distribution with a system random source. + + + + + Create a new dense vector with values sampled from the standard distribution with a system random source. + + + + + Create a new dense vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a dense vector of T with the given size. + + The size of the vector. + + + + Create a dense vector of T that is directly bound to the specified array. + + + + + Create a new dense vector and initialize each value using the provided value. + + + + + Create a new dense vector and initialize each value using the provided init function. + + + + + Create a new dense vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a sparse vector of T with the given size. + + The size of the vector. + + + + Create a new sparse vector and initialize each value using the provided value. + + + + + Create a new sparse vector and initialize each value using the provided init function. + + + + + Create a new sparse vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + A class which encapsulates the functionality of a Cholesky factorization. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + Supported data types are double, single, , and . + + + + Gets the lower triangular form of the Cholesky matrix. + + + + + Gets the determinant of the matrix for which the Cholesky matrix was computed. + + + + + Gets the log determinant of the matrix for which the Cholesky matrix was computed. + + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + Supported data types are double, single, , and . + + + + Gets or sets a value indicating whether matrix is symmetric or not + + + + + Gets the absolute value of determinant of the square matrix for which the EVD was computed. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + Gets or sets the eigen values (λ) of matrix in ascending value. + + + + + Gets or sets eigenvectors. + + + + + Gets or sets the block diagonal eigenvalue matrix. + + + + + Solves a system of linear equations, AX = B, with A EVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, AX = B, with A EVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + Supported data types are double, single, , and . + + + + Classes that solves a system of linear equations, AX = B. + + Supported data types are double, single, , and . + + + + Solves a system of linear equations, AX = B. + + The right hand side Matrix, B. + The left hand side Matrix, X. + + + + Solves a system of linear equations, AX = B. + + The right hand side Matrix, B. + The left hand side Matrix, X. + + + + Solves a system of linear equations, Ax = b + + The right hand side vector, b. + The left hand side Vector, x. + + + + Solves a system of linear equations, Ax = b. + + The right hand side vector, b. + The left hand side Matrix>, x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + In the Math.Net implementation we also store a set of pivot elements for increased + numerical stability. The pivot elements encode a permutation matrix P such that P*A = L*U. + + + The computation of the LU factorization is done at construction time. + + Supported data types are double, single, , and . + + + + Gets the lower triangular factor. + + + + + Gets the upper triangular factor. + + + + + Gets the permutation applied to LU factorization. + + + + + Gets the determinant of the matrix for which the LU factorization was computed. + + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + The type of QR factorization go perform. + + + + + Compute the full QR factorization of a matrix. + + + + + Compute the thin QR factorization of a matrix. + + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A (m x n) may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + If a factorization is performed, the resulting Q matrix is an m x m matrix + and the R matrix is an m x n matrix. If a factorization is performed, the + resulting Q matrix is an m x n matrix and the R matrix is an n x n matrix. + + Supported data types are double, single, , and . + + + + Gets or sets orthogonal Q matrix + + + + + Gets the upper triangular factor R. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD). + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + Supported data types are double, single, , and . + + + Indicating whether U and VT matrices have been computed during SVD factorization. + + + + Gets the singular values (Σ) of matrix in ascending value. + + + + + Gets the left singular vectors (U - m-by-m unitary matrix) + + + + + Gets the transpose right singular vectors (transpose of V, an n-by-n unitary matrix) + + + + + Returns the singular values as a diagonal . + + The singular values as a diagonal . + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets the two norm of the . + + The 2-norm of the . + + + + Gets the condition number max(S) / min(S) + + The condition number. + + + + Gets the determinant of the square matrix for which the SVD was computed. + + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Defines the base class for Matrix classes. + + + Defines the base class for Matrix classes. + + Supported data types are double, single, , and . + + Defines the base class for Matrix classes. + + + Defines the base class for Matrix classes. + + + + + The value of 1.0. + + + + + The value of 0.0. + + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the matrix and stores the result in the result matrix. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts each element of the matrix from a scalar and stores the result in the result matrix. + + The scalar to subtract from. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar denominator to use. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar numerator to use. + The matrix to store the result of the division. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The matrix to store the result of the pointwise power. + + + + Pointwise raise this matrix to an exponent matrix and store the result into the result matrix. + + The exponent matrix to raise this matrix values to. + The matrix to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Adds a scalar to each element of the matrix. + + The scalar to add. + The result of the addition. + If the two matrices don't have the same dimensions. + + + + Adds a scalar to each element of the matrix and stores the result in the result matrix. + + The scalar to add. + The matrix to store the result of the addition. + If the two matrices don't have the same dimensions. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The result of the addition. + If the two matrices don't have the same dimensions. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the matrix. + + The scalar to subtract. + A new matrix containing the subtraction of this matrix and the scalar. + + + + Subtracts a scalar from each element of the matrix and stores the result in the result matrix. + + The scalar to subtract. + The matrix to store the result of the subtraction. + If this matrix and are not the same size. + + + + Subtracts each element of the matrix from a scalar. + + The scalar to subtract from. + A new matrix containing the subtraction of the scalar and this matrix. + + + + Subtracts each element of the matrix from a scalar and stores the result in the result matrix. + + The scalar to subtract from. + The matrix to store the result of the subtraction. + If this matrix and are not the same size. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The result of the subtraction. + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + If the two matrices don't have the same dimensions. + + + + Multiplies each element of this matrix with a scalar. + + The scalar to multiply with. + The result of the multiplication. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + If the result matrix's dimensions are not the same as this matrix. + + + + Divides each element of this matrix with a scalar. + + The scalar to divide with. + The result of the division. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + If the result matrix's dimensions are not the same as this matrix. + + + + Divides a scalar by each element of the matrix. + + The scalar to divide. + The result of the division. + + + + Divides a scalar by each element of the matrix and places results into the result matrix. + + The scalar to divide. + The matrix to store the result of the division. + If the result matrix's dimensions are not the same as this matrix. + + + + Multiplies this matrix by a vector and returns the result. + + The vector to multiply with. + The result of the multiplication. + If this.ColumnCount != rightSide.Count. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + If result.Count != this.RowCount. + If this.ColumnCount != .Count. + + + + Left multiply a matrix with a vector ( = vector * matrix ). + + The vector to multiply with. + The result of the multiplication. + If this.RowCount != .Count. + + + + Left multiply a matrix with a vector ( = vector * matrix ) and place the result in the result vector. + + The vector to multiply with. + The result of the multiplication. + If result.Count != this.ColumnCount. + If this.RowCount != .Count. + + + + Left multiply a matrix with a vector ( = vector * matrix ) and place the result in the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + If this.Columns != other.Rows. + If the result matrix's dimensions are not the this.Rows x other.Columns. + + + + Multiplies this matrix with another matrix and returns the result. + + The matrix to multiply with. + If this.Columns != other.Rows. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + If this.Columns != other.ColumnCount. + If the result matrix's dimensions are not the this.RowCount x other.RowCount. + + + + Multiplies this matrix with transpose of another matrix and returns the result. + + The matrix to multiply with. + If this.Columns != other.ColumnCount. + The result of the multiplication. + + + + Multiplies the transpose of this matrix by a vector and returns the result. + + The vector to multiply with. + The result of the multiplication. + If this.RowCount != rightSide.Count. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + If result.Count != this.ColumnCount. + If this.RowCount != .Count. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + If this.Rows != other.RowCount. + If the result matrix's dimensions are not the this.ColumnCount x other.ColumnCount. + + + + Multiplies the transpose of this matrix with another matrix and returns the result. + + The matrix to multiply with. + If this.Rows != other.RowCount. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + If this.Columns != other.ColumnCount. + If the result matrix's dimensions are not the this.RowCount x other.RowCount. + + + + Multiplies this matrix with the conjugate transpose of another matrix and returns the result. + + The matrix to multiply with. + If this.Columns != other.ColumnCount. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix by a vector and returns the result. + + The vector to multiply with. + The result of the multiplication. + If this.RowCount != rightSide.Count. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + If result.Count != this.ColumnCount. + If this.RowCount != .Count. + + + + Multiplies the conjugate transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + If this.Rows != other.RowCount. + If the result matrix's dimensions are not the this.ColumnCount x other.ColumnCount. + + + + Multiplies the conjugate transpose of this matrix with another matrix and returns the result. + + The matrix to multiply with. + If this.Rows != other.RowCount. + The result of the multiplication. + + + + Raises this square matrix to a positive integer exponent and places the results into the result matrix. + + The positive integer exponent to raise the matrix to. + The result of the power. + + + + Multiplies this square matrix with another matrix and returns the result. + + The positive integer exponent to raise the matrix to. + + + + Negate each element of this matrix. + + A matrix containing the negated values. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + if the result matrix's dimensions are not the same as this matrix. + + + + Complex conjugate each element of this matrix. + + A matrix containing the conjugated values. + + + + Complex conjugate each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + if the result matrix's dimensions are not the same as this matrix. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the matrix. + + The scalar denominator to use. + A matrix containing the results. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the matrix. + + The scalar numerator to use. + A matrix containing the results. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the matrix. + + The scalar numerator to use. + Matrix to store the results in. + + + + Computes the remainder (matrix % divisor), where the result has the sign of the dividend, + for each element of the matrix. + + The scalar denominator to use. + A matrix containing the results. + + + + Computes the remainder (matrix % divisor), where the result has the sign of the dividend, + for each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (dividend % matrix), where the result has the sign of the dividend, + for each element of the matrix. + + The scalar numerator to use. + A matrix containing the results. + + + + Computes the remainder (dividend % matrix), where the result has the sign of the dividend, + for each element of the matrix. + + The scalar numerator to use. + Matrix to store the results in. + + + + Pointwise multiplies this matrix with another matrix. + + The matrix to pointwise multiply with this one. + If this matrix and are not the same size. + A new matrix that is the pointwise multiplication of this matrix and . + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + If this matrix and are not the same size. + If this matrix and are not the same size. + + + + Pointwise divide this matrix by another matrix. + + The pointwise denominator matrix to use. + If this matrix and are not the same size. + A new matrix that is the pointwise division of this matrix and . + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use. + The matrix to store the result of the pointwise division. + If this matrix and are not the same size. + If this matrix and are not the same size. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + + + + Pointwise raise this matrix to an exponent. + + The exponent to raise this matrix values to. + The matrix to store the result into. + If this matrix and are not the same size. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + + + + Pointwise raise this matrix to an exponent. + + The exponent to raise this matrix values to. + The matrix to store the result into. + If this matrix and are not the same size. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this matrix by another matrix. + + The pointwise denominator matrix to use. + If this matrix and are not the same size. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this matrix by another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use. + The matrix to store the result of the pointwise modulus. + If this matrix and are not the same size. + If this matrix and are not the same size. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this matrix by another matrix. + + The pointwise denominator matrix to use. + If this matrix and are not the same size. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this matrix by another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use. + The matrix to store the result of the pointwise remainder. + If this matrix and are not the same size. + If this matrix and are not the same size. + + + + Helper function to apply a unary function to a matrix. The function + f modifies the matrix given to it in place. Before its + called, a copy of the 'this' matrix is first created, then passed to + f. The copy is then returned as the result + + Function which takes a matrix, modifies it in place and returns void + New instance of matrix which is the result + + + + Helper function to apply a unary function which modifies a matrix + in place. + + Function which takes a matrix, modifies it in place and returns void + The matrix to be passed to f and where the result is to be stored + If this vector and are not the same size. + + + + Helper function to apply a binary function which takes two matrices + and modifies the latter in place. A copy of the "this" matrix is + first made and then passed to f together with the other matrix. The + copy is then returned as the result + + Function which takes two matrices, modifies the second in place and returns void + The other matrix to be passed to the function as argument. It is not modified + The resulting matrix + If this amtrix and are not the same dimension. + + + + Helper function to apply a binary function which takes two matrices + and modifies the second one in place + + Function which takes two matrices, modifies the second in place and returns void + The other matrix to be passed to the function as argument. It is not modified + The resulting matrix + If this matrix and are not the same dimension. + + + + Pointwise applies the exponent function to each value. + + + + + Pointwise applies the exponent function to each value. + + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the natural logarithm function to each value. + + + + + Pointwise applies the natural logarithm function to each value. + + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the abs function to each value + + + + + Pointwise applies the abs function to each value + + The vector to store the result + + + + Pointwise applies the acos function to each value + + + + + Pointwise applies the acos function to each value + + The vector to store the result + + + + Pointwise applies the asin function to each value + + + + + Pointwise applies the asin function to each value + + The vector to store the result + + + + Pointwise applies the atan function to each value + + + + + Pointwise applies the atan function to each value + + The vector to store the result + + + + Pointwise applies the atan2 function to each value of the current + matrix and a given other matrix being the 'x' of atan2 and the + 'this' matrix being the 'y' + + + + + + + Pointwise applies the atan2 function to each value of the current + matrix and a given other matrix being the 'x' of atan2 and the + 'this' matrix being the 'y' + + + + + + + Pointwise applies the ceiling function to each value + + + + + Pointwise applies the ceiling function to each value + + The vector to store the result + + + + Pointwise applies the cos function to each value + + + + + Pointwise applies the cos function to each value + + The vector to store the result + + + + Pointwise applies the cosh function to each value + + + + + Pointwise applies the cosh function to each value + + The vector to store the result + + + + Pointwise applies the floor function to each value + + + + + Pointwise applies the floor function to each value + + The vector to store the result + + + + Pointwise applies the log10 function to each value + + + + + Pointwise applies the log10 function to each value + + The vector to store the result + + + + Pointwise applies the round function to each value + + + + + Pointwise applies the round function to each value + + The vector to store the result + + + + Pointwise applies the sign function to each value + + + + + Pointwise applies the sign function to each value + + The vector to store the result + + + + Pointwise applies the sin function to each value + + + + + Pointwise applies the sin function to each value + + The vector to store the result + + + + Pointwise applies the sinh function to each value + + + + + Pointwise applies the sinh function to each value + + The vector to store the result + + + + Pointwise applies the sqrt function to each value + + + + + Pointwise applies the sqrt function to each value + + The vector to store the result + + + + Pointwise applies the tan function to each value + + + + + Pointwise applies the tan function to each value + + The vector to store the result + + + + Pointwise applies the tanh function to each value + + + + + Pointwise applies the tanh function to each value + + The vector to store the result + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + + Calculates the rank of the matrix. + + effective numerical rank, obtained from SVD + + + + Calculates the nullity of the matrix. + + effective numerical nullity, obtained from SVD + + + Calculates the condition number of this matrix. + The condition number of the matrix. + The condition number is calculated using singular value decomposition. + + + Computes the determinant of this matrix. + The determinant of this matrix. + + + + Computes an orthonormal basis for the null space of this matrix, + also known as the kernel of the corresponding matrix transformation. + + + + + Computes an orthonormal basis for the column space of this matrix, + also known as the range or image of the corresponding matrix transformation. + + + + Computes the inverse of this matrix. + The inverse of this matrix. + + + Computes the Moore-Penrose Pseudo-Inverse of this matrix. + + + + Computes the Kronecker product of this matrix with the given matrix. The new matrix is M-by-N + with M = this.Rows * lower.Rows and N = this.Columns * lower.Columns. + + The other matrix. + The Kronecker product of the two matrices. + + + + Computes the Kronecker product of this matrix with the given matrix. The new matrix is M-by-N + with M = this.Rows * lower.Rows and N = this.Columns * lower.Columns. + + The other matrix. + The Kronecker product of the two matrices. + If the result matrix's dimensions are not (this.Rows * lower.rows) x (this.Columns * lower.Columns). + + + + Pointwise applies the minimum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the minimum with a scalar to each value. + + The scalar value to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the maximum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the maximum with a scalar to each value. + + The scalar value to compare to. + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the absolute minimum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the absolute minimum with a scalar to each value. + + The scalar value to compare to. + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the absolute maximum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the absolute maximum with a scalar to each value. + + The scalar value to compare to. + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the minimum with the values of another matrix to each value. + + The matrix with the values to compare to. + + + + Pointwise applies the minimum with the values of another matrix to each value. + + The matrix with the values to compare to. + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the maximum with the values of another matrix to each value. + + The matrix with the values to compare to. + + + + Pointwise applies the maximum with the values of another matrix to each value. + + The matrix with the values to compare to. + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the absolute minimum with the values of another matrix to each value. + + The matrix with the values to compare to. + + + + Pointwise applies the absolute minimum with the values of another matrix to each value. + + The matrix with the values to compare to. + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the absolute maximum with the values of another matrix to each value. + + The matrix with the values to compare to. + + + + Pointwise applies the absolute maximum with the values of another matrix to each value. + + The matrix with the values to compare to. + The matrix to store the result. + If this matrix and are not the same size. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced L2 norm of the matrix. + The largest singular value of the matrix. + + For sparse matrices, the L2 norm is computed using a dense implementation of singular value decomposition. + In a later release, it will be replaced with a sparse implementation. + + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Calculates the p-norms of all row vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the p-norms of all column vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all row vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all column vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the value sum of each row vector. + + + + + Calculates the value sum of each column vector. + + + + + Calculates the absolute value sum of each row vector. + + + + + Calculates the absolute value sum of each column vector. + + + + + Indicates whether the current object is equal to another object of the same type. + + + An object to compare with this object. + + + true if the current object is equal to the parameter; otherwise, false. + + + + + Determines whether the specified is equal to this instance. + + The to compare with this instance. + + true if the specified is equal to this instance; otherwise, false. + + + + + Returns a hash code for this instance. + + + A hash code for this instance, suitable for use in hashing algorithms and data structures like a hash table. + + + + + Returns a string that describes the type, dimensions and shape of this matrix. + + + + + Returns a string 2D array that summarizes the content of this matrix. + + + + + Returns a string 2D array that summarizes the content of this matrix. + + + + + Returns a string that summarizes the content of this matrix. + + + + + Returns a string that summarizes the content of this matrix. + + + + + Returns a string that summarizes this matrix. + + + + + Returns a string that summarizes this matrix. + The maximum number of cells can be configured in the class. + + + + + Returns a string that summarizes this matrix. + The maximum number of cells can be configured in the class. + The format string is ignored. + + + + + Initializes a new instance of the Matrix class. + + + + + Gets the raw matrix data storage. + + + + + Gets the number of columns. + + The number of columns. + + + + Gets the number of rows. + + The number of rows. + + + + Gets or sets the value at the given row and column, with range checking. + + + The row of the element. + + + The column of the element. + + The value to get or set. + This method is ranged checked. and + to get and set values without range checking. + + + + Retrieves the requested element without range checking. + + + The row of the element. + + + The column of the element. + + + The requested element. + + + + + Sets the value of the given element without range checking. + + + The row of the element. + + + The column of the element. + + + The value to set the element to. + + + + + Sets all values to zero. + + + + + Sets all values of a row to zero. + + + + + Sets all values of a column to zero. + + + + + Sets all values for all of the chosen rows to zero. + + + + + Sets all values for all of the chosen columns to zero. + + + + + Sets all values of a sub-matrix to zero. + + + + + Set all values whose absolute value is smaller than the threshold to zero, in-place. + + + + + Set all values that meet the predicate to zero, in-place. + + + + + Creates a clone of this instance. + + + A clone of the instance. + + + + + Copies the elements of this matrix to the given matrix. + + + The matrix to copy values into. + + + If target is . + + + If this and the target matrix do not have the same dimensions.. + + + + + Copies a row into an Vector. + + The row to copy. + A Vector containing the copied elements. + If is negative, + or greater than or equal to the number of rows. + + + + Copies a row into to the given Vector. + + The row to copy. + The Vector to copy the row into. + If the result vector is . + If is negative, + or greater than or equal to the number of rows. + If this.Columns != result.Count. + + + + Copies the requested row elements into a new Vector. + + The row to copy elements from. + The column to start copying from. + The number of elements to copy. + A Vector containing the requested elements. + If: + is negative, + or greater than or equal to the number of rows. + is negative, + or greater than or equal to the number of columns. + (columnIndex + length) >= Columns. + If is not positive. + + + + Copies the requested row elements into a new Vector. + + The row to copy elements from. + The column to start copying from. + The number of elements to copy. + The Vector to copy the column into. + If the result Vector is . + If is negative, + or greater than or equal to the number of columns. + If is negative, + or greater than or equal to the number of rows. + If + + is greater than or equal to the number of rows. + If is not positive. + If result.Count < length. + + + + Copies a column into a new Vector>. + + The column to copy. + A Vector containing the copied elements. + If is negative, + or greater than or equal to the number of columns. + + + + Copies a column into to the given Vector. + + The column to copy. + The Vector to copy the column into. + If the result Vector is . + If is negative, + or greater than or equal to the number of columns. + If this.Rows != result.Count. + + + + Copies the requested column elements into a new Vector. + + The column to copy elements from. + The row to start copying from. + The number of elements to copy. + A Vector containing the requested elements. + If: + is negative, + or greater than or equal to the number of columns. + is negative, + or greater than or equal to the number of rows. + (rowIndex + length) >= Rows. + + If is not positive. + + + + Copies the requested column elements into the given vector. + + The column to copy elements from. + The row to start copying from. + The number of elements to copy. + The Vector to copy the column into. + If the result Vector is . + If is negative, + or greater than or equal to the number of columns. + If is negative, + or greater than or equal to the number of rows. + If + + is greater than or equal to the number of rows. + If is not positive. + If result.Count < length. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Creates a matrix that contains the values from the requested sub-matrix. + + The row to start copying from. + The number of rows to copy. Must be positive. + The column to start copying from. + The number of columns to copy. Must be positive. + The requested sub-matrix. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + If or + is not positive. + + + + Returns the elements of the diagonal in a Vector. + + The elements of the diagonal. + For non-square matrices, the method returns Min(Rows, Columns) elements where + i == j (i is the row index, and j is the column index). + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Creates a new matrix and inserts the given column at the given index. + + The index of where to insert the column. + The column to insert. + A new matrix with the inserted column. + If is . + If is < zero or > the number of columns. + If the size of != the number of rows. + + + + Creates a new matrix with the given column removed. + + The index of the column to remove. + A new matrix without the chosen column. + If is < zero or >= the number of columns. + + + + Copies the values of the given Vector to the specified column. + + The column to copy the values to. + The vector to copy the values from. + If is . + If is less than zero, + or greater than or equal to the number of columns. + If the size of does not + equal the number of rows of this Matrix. + + + + Copies the values of the given Vector to the specified sub-column. + + The column to copy the values to. + The row to start copying to. + The number of elements to copy. + The vector to copy the values from. + If is . + If is less than zero, + or greater than or equal to the number of columns. + If the size of does not + equal the number of rows of this Matrix. + + + + Copies the values of the given array to the specified column. + + The column to copy the values to. + The array to copy the values from. + If is . + If is less than zero, + or greater than or equal to the number of columns. + If the size of does not + equal the number of rows of this Matrix. + If the size of does not + equal the number of rows of this Matrix. + + + + Creates a new matrix and inserts the given row at the given index. + + The index of where to insert the row. + The row to insert. + A new matrix with the inserted column. + If is . + If is < zero or > the number of rows. + If the size of != the number of columns. + + + + Creates a new matrix with the given row removed. + + The index of the row to remove. + A new matrix without the chosen row. + If is < zero or >= the number of rows. + + + + Copies the values of the given Vector to the specified row. + + The row to copy the values to. + The vector to copy the values from. + If is . + If is less than zero, + or greater than or equal to the number of rows. + If the size of does not + equal the number of columns of this Matrix. + + + + Copies the values of the given Vector to the specified sub-row. + + The row to copy the values to. + The column to start copying to. + The number of elements to copy. + The vector to copy the values from. + If is . + If is less than zero, + or greater than or equal to the number of rows. + If the size of does not + equal the number of columns of this Matrix. + + + + Copies the values of the given array to the specified row. + + The row to copy the values to. + The array to copy the values from. + If is . + If is less than zero, + or greater than or equal to the number of rows. + If the size of does not + equal the number of columns of this Matrix. + + + + Copies the values of a given matrix into a region in this matrix. + + The row to start copying to. + The column to start copying to. + The sub-matrix to copy from. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + + + + Copies the values of a given matrix into a region in this matrix. + + The row to start copying to. + The number of rows to copy. Must be positive. + The column to start copying to. + The number of columns to copy. Must be positive. + The sub-matrix to copy from. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + the size of is not at least x . + If or + is not positive. + + + + Copies the values of a given matrix into a region in this matrix. + + The row to start copying to. + The row of the sub-matrix to start copying from. + The number of rows to copy. Must be positive. + The column to start copying to. + The column of the sub-matrix to start copying from. + The number of columns to copy. Must be positive. + The sub-matrix to copy from. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + the size of is not at least x . + If or + is not positive. + + + + Copies the values of the given Vector to the diagonal. + + The vector to copy the values from. The length of the vector should be + Min(Rows, Columns). + If is . + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + + Copies the values of the given array to the diagonal. + + The array to copy the values from. The length of the vector should be + Min(Rows, Columns). + If is . + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + + Returns the transpose of this matrix. + + The transpose of this matrix. + + + + Puts the transpose of this matrix into the result matrix. + + + + + Returns the conjugate transpose of this matrix. + + The conjugate transpose of this matrix. + + + + Puts the conjugate transpose of this matrix into the result matrix. + + + + + Permute the rows of a matrix according to a permutation. + + The row permutation to apply to this matrix. + + + + Permute the columns of a matrix according to a permutation. + + The column permutation to apply to this matrix. + + + + Concatenates this matrix with the given matrix. + + The matrix to concatenate. + The combined matrix. + + + + + + Concatenates this matrix with the given matrix and places the result into the result matrix. + + The matrix to concatenate. + The combined matrix. + + + + + + Stacks this matrix on top of the given matrix and places the result into the result matrix. + + The matrix to stack this matrix upon. + The combined matrix. + If lower is . + If upper.Columns != lower.Columns. + + + + + + Stacks this matrix on top of the given matrix and places the result into the result matrix. + + The matrix to stack this matrix upon. + The combined matrix. + If lower is . + If upper.Columns != lower.Columns. + + + + + + Diagonally stacks his matrix on top of the given matrix. The new matrix is a M-by-N matrix, + where M = this.Rows + lower.Rows and N = this.Columns + lower.Columns. + The values of off the off diagonal matrices/blocks are set to zero. + + The lower, right matrix. + If lower is . + the combined matrix + + + + + + Diagonally stacks his matrix on top of the given matrix and places the combined matrix into the result matrix. + + The lower, right matrix. + The combined matrix + If lower is . + If the result matrix is . + If the result matrix's dimensions are not (this.Rows + lower.rows) x (this.Columns + lower.Columns). + + + + + + Evaluates whether this matrix is symmetric. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + Evaluates whether this matrix is conjugate symmetric. + + + + + Returns this matrix as a multidimensional array. + The returned array will be independent from this matrix. + A new memory block will be allocated for the array. + + A multidimensional containing the values of this matrix. + + + + Returns the matrix's elements as an array with the data laid out column by column (column major). + The returned array will be independent from this matrix. + A new memory block will be allocated for the array. + +
+            1, 2, 3
+            4, 5, 6  will be returned as  1, 4, 7, 2, 5, 8, 3, 6, 9
+            7, 8, 9
+            
+ An array containing the matrix's elements. + + +
+ + + Returns the matrix's elements as an array with the data laid row by row (row major). + The returned array will be independent from this matrix. + A new memory block will be allocated for the array. + +
+            1, 2, 3
+            4, 5, 6  will be returned as  1, 2, 3, 4, 5, 6, 7, 8, 9
+            7, 8, 9
+            
+ An array containing the matrix's elements. + + +
+ + + Returns this matrix as array of row arrays. + The returned arrays will be independent from this matrix. + A new memory block will be allocated for the arrays. + + + + + Returns this matrix as array of column arrays. + The returned arrays will be independent from this matrix. + A new memory block will be allocated for the arrays. + + + + + Returns the internal multidimensional array of this matrix if, and only if, this matrix is stored by such an array internally. + Otherwise returns null. Changes to the returned array and the matrix will affect each other. + Use ToArray instead if you always need an independent array. + + + + + Returns the internal column by column (column major) array of this matrix if, and only if, this matrix is stored by such arrays internally. + Otherwise returns null. Changes to the returned arrays and the matrix will affect each other. + Use ToColumnMajorArray instead if you always need an independent array. + +
+            1, 2, 3
+            4, 5, 6  will be returned as  1, 4, 7, 2, 5, 8, 3, 6, 9
+            7, 8, 9
+            
+ An array containing the matrix's elements. + + +
+ + + Returns the internal row by row (row major) array of this matrix if, and only if, this matrix is stored by such arrays internally. + Otherwise returns null. Changes to the returned arrays and the matrix will affect each other. + Use ToRowMajorArray instead if you always need an independent array. + +
+            1, 2, 3
+            4, 5, 6  will be returned as  1, 2, 3, 4, 5, 6, 7, 8, 9
+            7, 8, 9
+            
+ An array containing the matrix's elements. + + +
+ + + Returns the internal row arrays of this matrix if, and only if, this matrix is stored by such arrays internally. + Otherwise returns null. Changes to the returned arrays and the matrix will affect each other. + Use ToRowArrays instead if you always need an independent array. + + + + + Returns the internal column arrays of this matrix if, and only if, this matrix is stored by such arrays internally. + Otherwise returns null. Changes to the returned arrays and the matrix will affect each other. + Use ToColumnArrays instead if you always need an independent array. + + + + + Returns an IEnumerable that can be used to iterate through all values of the matrix. + + + The enumerator will include all values, even if they are zero. + The ordering of the values is unspecified (not necessarily column-wise or row-wise). + + + + + Returns an IEnumerable that can be used to iterate through all values of the matrix. + + + The enumerator will include all values, even if they are zero. + The ordering of the values is unspecified (not necessarily column-wise or row-wise). + + + + + Returns an IEnumerable that can be used to iterate through all values of the matrix and their index. + + + The enumerator returns a Tuple with the first two values being the row and column index + and the third value being the value of the element at that index. + The enumerator will include all values, even if they are zero. + + + + + Returns an IEnumerable that can be used to iterate through all values of the matrix and their index. + + + The enumerator returns a Tuple with the first two values being the row and column index + and the third value being the value of the element at that index. + The enumerator will include all values, even if they are zero. + + + + + Returns an IEnumerable that can be used to iterate through all non-zero values of the matrix. + + + The enumerator will skip all elements with a zero value. + + + + + Returns an IEnumerable that can be used to iterate through all non-zero values of the matrix and their index. + + + The enumerator returns a Tuple with the first two values being the row and column index + and the third value being the value of the element at that index. + The enumerator will skip all elements with a zero value. + + + + + Returns an IEnumerable that can be used to iterate through all columns of the matrix. + + + + + Returns an IEnumerable that can be used to iterate through a subset of all columns of the matrix. + + The column to start enumerating over. + The number of columns to enumerating over. + + + + Returns an IEnumerable that can be used to iterate through all columns of the matrix and their index. + + + The enumerator returns a Tuple with the first value being the column index + and the second value being the value of the column at that index. + + + + + Returns an IEnumerable that can be used to iterate through a subset of all columns of the matrix and their index. + + The column to start enumerating over. + The number of columns to enumerating over. + + The enumerator returns a Tuple with the first value being the column index + and the second value being the value of the column at that index. + + + + + Returns an IEnumerable that can be used to iterate through all rows of the matrix. + + + + + Returns an IEnumerable that can be used to iterate through a subset of all rows of the matrix. + + The row to start enumerating over. + The number of rows to enumerating over. + + + + Returns an IEnumerable that can be used to iterate through all rows of the matrix and their index. + + + The enumerator returns a Tuple with the first value being the row index + and the second value being the value of the row at that index. + + + + + Returns an IEnumerable that can be used to iterate through a subset of all rows of the matrix and their index. + + The row to start enumerating over. + The number of rows to enumerating over. + + The enumerator returns a Tuple with the first value being the row index + and the second value being the value of the row at that index. + + + + + Applies a function to each value of this matrix and replaces the value with its result. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + Applies a function to each value of this matrix and replaces the value with its result. + The row and column indices of each value (zero-based) are passed as first arguments to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + Applies a function to each value of this matrix and replaces the value in the result matrix. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + Applies a function to each value of this matrix and replaces the value in the result matrix. + The index of each value (zero-based) is passed as first argument to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + Applies a function to each value of this matrix and replaces the value in the result matrix. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + Applies a function to each value of this matrix and replaces the value in the result matrix. + The index of each value (zero-based) is passed as first argument to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + Applies a function to each value of this matrix and returns the results as a new matrix. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + Applies a function to each value of this matrix and returns the results as a new matrix. + The index of each value (zero-based) is passed as first argument to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + For each row, applies a function f to each element of the row, threading an accumulator argument through the computation. + Returns an array with the resulting accumulator states for each row. + + + + + For each column, applies a function f to each element of the column, threading an accumulator argument through the computation. + Returns an array with the resulting accumulator states for each column. + + + + + Applies a function f to each row vector, threading an accumulator vector argument through the computation. + Returns the resulting accumulator vector. + + + + + Applies a function f to each column vector, threading an accumulator vector argument through the computation. + Returns the resulting accumulator vector. + + + + + Reduces all row vectors by applying a function between two of them, until only a single vector is left. + + + + + Reduces all column vectors by applying a function between two of them, until only a single vector is left. + + + + + Applies a function to each value pair of two matrices and replaces the value in the result vector. + + + + + Applies a function to each value pair of two matrices and returns the results as a new vector. + + + + + Applies a function to update the status with each value pair of two matrices and returns the resulting status. + + + + + Returns a tuple with the index and value of the first element satisfying a predicate, or null if none is found. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns a tuple with the index and values of the first element pair of two matrices of the same size satisfying a predicate, or null if none is found. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if at least one element satisfies a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if at least one element pairs of two matrices of the same size satisfies a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if all elements satisfy a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if all element pairs of two matrices of the same size satisfy a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Adds a scalar to each element of the matrix. + + This operator will allocate new memory for the result. It will + choose the representation of the provided matrix. + The left matrix to add. + The scalar value to add. + The result of the addition. + If is . + + + + Adds a scalar to each element of the matrix. + + This operator will allocate new memory for the result. It will + choose the representation of the provided matrix. + The scalar value to add. + The right matrix to add. + The result of the addition. + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the subtraction. + If and don't have the same dimensions. + If or is . + + + + Subtracts a scalar from each element of a matrix. + + This operator will allocate new memory for the result. It will + choose the representation of the provided matrix. + The left matrix to subtract. + The scalar value to subtract. + The result of the subtraction. + If and don't have the same dimensions. + If or is . + + + + Substracts each element of a matrix from a scalar. + + This operator will allocate new memory for the result. It will + choose the representation of the provided matrix. + The scalar value to subtract. + The right matrix to subtract. + The result of the subtraction. + If and don't have the same dimensions. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Divides a scalar with a matrix. + + The scalar to divide. + The matrix. + The result of the division. + If is . + + + + Divides a matrix with a scalar. + + The matrix to divide. + The scalar value. + The result of the division. + If is . + + + + Computes the pointwise remainder (% operator), where the result has the sign of the dividend, + of each element of the matrix of the given divisor. + + The matrix whose elements we want to compute the modulus of. + The divisor to use. + The result of the calculation + If is . + + + + Computes the pointwise remainder (% operator), where the result has the sign of the dividend, + of the given dividend of each element of the matrix. + + The dividend we want to compute the modulus of. + The matrix whose elements we want to use as divisor. + The result of the calculation + If is . + + + + Computes the pointwise remainder (% operator), where the result has the sign of the dividend, + of each element of two matrices. + + The matrix whose elements we want to compute the remainder of. + The divisor to use. + If and are not the same size. + If is . + + + + Computes the sqrt of a matrix pointwise + + The input matrix + + + + + Computes the exponential of a matrix pointwise + + The input matrix + + + + + Computes the log of a matrix pointwise + + The input matrix + + + + + Computes the log10 of a matrix pointwise + + The input matrix + + + + + Computes the sin of a matrix pointwise + + The input matrix + + + + + Computes the cos of a matrix pointwise + + The input matrix + + + + + Computes the tan of a matrix pointwise + + The input matrix + + + + + Computes the asin of a matrix pointwise + + The input matrix + + + + + Computes the acos of a matrix pointwise + + The input matrix + + + + + Computes the atan of a matrix pointwise + + The input matrix + + + + + Computes the sinh of a matrix pointwise + + The input matrix + + + + + Computes the cosh of a matrix pointwise + + The input matrix + + + + + Computes the tanh of a matrix pointwise + + The input matrix + + + + + Computes the absolute value of a matrix pointwise + + The input matrix + + + + + Computes the floor of a matrix pointwise + + The input matrix + + + + + Computes the ceiling of a matrix pointwise + + The input matrix + + + + + Computes the rounded value of a matrix pointwise + + The input matrix + + + + + Computes the Cholesky decomposition for a matrix. + + The Cholesky decomposition object. + + + + Computes the LU decomposition for a matrix. + + The LU decomposition object. + + + + Computes the QR decomposition for a matrix. + + The type of QR factorization to perform. + The QR decomposition object. + + + + Computes the QR decomposition for a matrix using Modified Gram-Schmidt Orthogonalization. + + The QR decomposition object. + + + + Computes the SVD decomposition for a matrix. + + Compute the singular U and VT vectors or not. + The SVD decomposition object. + + + + Computes the EVD decomposition for a matrix. + + The EVD decomposition object. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix (this matrix), b is the solution vector and x is the unknown vector. + + The solution vector b. + The result vector x. + The iterative solver to use. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + Solves the matrix equation AX = B, where A is the coefficient matrix (this matrix), B is the solution matrix and X is the unknown matrix. + + The solution matrix B. + The result matrix X + The iterative solver to use. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix (this matrix), b is the solution vector and x is the unknown vector. + + The solution vector b. + The result vector x. + The iterative solver to use. + Criteria to control when to stop iterating. + The preconditioner to use for approximations. + + + + Solves the matrix equation AX = B, where A is the coefficient matrix (this matrix), B is the solution matrix and X is the unknown matrix. + + The solution matrix B. + The result matrix X + The iterative solver to use. + Criteria to control when to stop iterating. + The preconditioner to use for approximations. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix (this matrix), b is the solution vector and x is the unknown vector. + + The solution vector b. + The result vector x. + The iterative solver to use. + Criteria to control when to stop iterating. + + + + Solves the matrix equation AX = B, where A is the coefficient matrix (this matrix), B is the solution matrix and X is the unknown matrix. + + The solution matrix B. + The result matrix X + The iterative solver to use. + Criteria to control when to stop iterating. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix (this matrix), b is the solution vector and x is the unknown vector. + + The solution vector b. + The iterative solver to use. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + The result vector x. + + + + Solves the matrix equation AX = B, where A is the coefficient matrix (this matrix), B is the solution matrix and X is the unknown matrix. + + The solution matrix B. + The iterative solver to use. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + The result matrix X. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix (this matrix), b is the solution vector and x is the unknown vector. + + The solution vector b. + The iterative solver to use. + Criteria to control when to stop iterating. + The preconditioner to use for approximations. + The result vector x. + + + + Solves the matrix equation AX = B, where A is the coefficient matrix (this matrix), B is the solution matrix and X is the unknown matrix. + + The solution matrix B. + The iterative solver to use. + Criteria to control when to stop iterating. + The preconditioner to use for approximations. + The result matrix X. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix (this matrix), b is the solution vector and x is the unknown vector. + + The solution vector b. + The iterative solver to use. + Criteria to control when to stop iterating. + The result vector x. + + + + Solves the matrix equation AX = B, where A is the coefficient matrix (this matrix), B is the solution matrix and X is the unknown matrix. + + The solution matrix B. + The iterative solver to use. + Criteria to control when to stop iterating. + The result matrix X. + + + + Converts a matrix to single precision. + + + + + Converts a matrix to double precision. + + + + + Converts a matrix to single precision complex numbers. + + + + + Converts a matrix to double precision complex numbers. + + + + + Gets a single precision complex matrix with the real parts from the given matrix. + + + + + Gets a double precision complex matrix with the real parts from the given matrix. + + + + + Gets a real matrix representing the real parts of a complex matrix. + + + + + Gets a real matrix representing the real parts of a complex matrix. + + + + + Gets a real matrix representing the imaginary parts of a complex matrix. + + + + + Gets a real matrix representing the imaginary parts of a complex matrix. + + + + + Existing data may not be all zeros, so clearing may be necessary + if not all of it will be overwritten anyway. + + + + + If existing data is assumed to be all zeros already, + clearing it may be skipped if applicable. + + + + + Allow skipping zero entries (without enforcing skipping them). + When enumerating sparse matrices this can significantly speed up operations. + + + + + Force applying the operation to all fields even if they are zero. + + + + + It is not known yet whether a matrix is symmetric or not. + + + + + A matrix is symmetric + + + + + A matrix is hermitian (conjugate symmetric). + + + + + A matrix is not symmetric + + + + + Defines an that uses a cancellation token as stop criterion. + + + + + Initializes a new instance of the class. + + + + + Initializes a new instance of the class. + + + + + Determines the status of the iterative calculation based on the stop criteria stored + by the current . Result is set into Status field. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual stop criteria may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Gets the current calculation status. + + + + + Resets the to the pre-calculation state. + + + + + Clones the current and its settings. + + A new instance of the class. + + + + Stop criterion that delegates the status determination to a delegate. + + + + + Create a new instance of this criterion with a custom implementation. + + Custom implementation with the same signature and semantics as the DetermineStatus method. + + + + Determines the status of the iterative calculation by delegating it to the provided delegate. + Result is set into Status field. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual stop criteria may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Gets the current calculation status. + + + + + Resets the IIterationStopCriterion to the pre-calculation state. + + + + + Clones this criterion and its settings. + + + + + Monitors an iterative calculation for signs of divergence. + + + + + The maximum relative increase the residual may experience without triggering a divergence warning. + + + + + The number of iterations over which a residual increase should be tracked before issuing a divergence warning. + + + + + The status of the calculation + + + + + The array that holds the tracking information. + + + + + The iteration number of the last iteration. + + + + + Initializes a new instance of the class with the specified maximum + relative increase and the specified minimum number of tracking iterations. + + The maximum relative increase that the residual may experience before a divergence warning is issued. + The minimum number of iterations over which the residual must grow before a divergence warning is issued. + + + + Gets or sets the maximum relative increase that the residual may experience before a divergence warning is issued. + + Thrown if the Maximum is set to zero or below. + + + + Gets or sets the minimum number of iterations over which the residual must grow before + issuing a divergence warning. + + Thrown if the value is set to less than one. + + + + Determines the status of the iterative calculation based on the stop criteria stored + by the current . Result is set into Status field. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual stop criteria may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Detect if solution is diverging + + true if diverging, otherwise false + + + + Gets required history Length + + + + + Gets the current calculation status. + + + + + Resets the to the pre-calculation state. + + + + + Clones the current and its settings. + + A new instance of the class. + + + + Defines an that monitors residuals for NaN's. + + + + + The status of the calculation + + + + + The iteration number of the last iteration. + + + + + Determines the status of the iterative calculation based on the stop criteria stored + by the current . Result is set into Status field. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual stop criteria may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Gets the current calculation status. + + + + + Resets the to the pre-calculation state. + + + + + Clones the current and its settings. + + A new instance of the class. + + + + The base interface for classes that provide stop criteria for iterative calculations. + + + + + Determines the status of the iterative calculation based on the stop criteria stored + by the current IIterationStopCriterion. Status is set to Status field of current object. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual stop criteria may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Gets the current calculation status. + + is not a legal value. Status should be set in implementation. + + + + Resets the IIterationStopCriterion to the pre-calculation state. + + To implementers: Invoking this method should not clear the user defined + property values, only the state that is used to track the progress of the + calculation. + + + + Defines the interface for classes that solve the matrix equation Ax = b in + an iterative manner. + + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + Defines the interface for objects that can create an iterative solver with + specific settings. This interface is used to pass iterative solver creation + setup information around. + + + + + Gets the type of the solver that will be created by this setup object. + + + + + Gets type of preconditioner, if any, that will be created by this setup object. + + + + + Creates the iterative solver to be used. + + + + + Creates the preconditioner to be used by default (can be overwritten). + + + + + Gets the relative speed of the solver. + + Returns a value between 0 and 1, inclusive. + + + + Gets the relative reliability of the solver. + + Returns a value between 0 and 1 inclusive. + + + + The base interface for preconditioner classes. + + + + Preconditioners are used by iterative solvers to improve the convergence + speed of the solving process. Increase in convergence speed + is related to the number of iterations necessary to get a converged solution. + So while in general the use of a preconditioner means that the iterative + solver will perform fewer iterations it does not guarantee that the actual + solution time decreases given that some preconditioners can be expensive to + setup and run. + + + Note that in general changes to the matrix will invalidate the preconditioner + if the changes occur after creating the preconditioner. + + + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix on which the preconditioner is based. + + + + Approximates the solution to the matrix equation Mx = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + Defines an that monitors the numbers of iteration + steps as stop criterion. + + + + + The default value for the maximum number of iterations the process is allowed + to perform. + + + + + The maximum number of iterations the calculation is allowed to perform. + + + + + The status of the calculation + + + + + Initializes a new instance of the class with the default maximum + number of iterations. + + + + + Initializes a new instance of the class with the specified maximum + number of iterations. + + The maximum number of iterations the calculation is allowed to perform. + + + + Gets or sets the maximum number of iterations the calculation is allowed to perform. + + Thrown if the Maximum is set to a negative value. + + + + Returns the maximum number of iterations to the default. + + + + + Determines the status of the iterative calculation based on the stop criteria stored + by the current . Result is set into Status field. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual stop criteria may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Gets the current calculation status. + + + + + Resets the to the pre-calculation state. + + + + + Clones the current and its settings. + + A new instance of the class. + + + + Iterative Calculation Status + + + + + An iterator that is used to check if an iterative calculation should continue or stop. + + + + + The collection that holds all the stop criteria and the flag indicating if they should be added + to the child iterators. + + + + + The status of the iterator. + + + + + Initializes a new instance of the class with the default stop criteria. + + + + + Initializes a new instance of the class with the specified stop criteria. + + + The specified stop criteria. Only one stop criterion of each type can be passed in. None + of the stop criteria will be passed on to child iterators. + + + + + Initializes a new instance of the class with the specified stop criteria. + + + The specified stop criteria. Only one stop criterion of each type can be passed in. None + of the stop criteria will be passed on to child iterators. + + + + + Gets the current calculation status. + + + + + Determines the status of the iterative calculation based on the stop criteria stored + by the current . Result is set into Status field. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual iterators may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Indicates to the iterator that the iterative process has been cancelled. + + + Does not reset the stop-criteria. + + + + + Resets the to the pre-calculation state. + + + + + Creates a deep clone of the current iterator. + + The deep clone of the current iterator. + + + + Defines an that monitors residuals as stop criterion. + + + + + The maximum value for the residual below which the calculation is considered converged. + + + + + The minimum number of iterations for which the residual has to be below the maximum before + the calculation is considered converged. + + + + + The status of the calculation + + + + + The number of iterations since the residuals got below the maximum. + + + + + The iteration number of the last iteration. + + + + + Initializes a new instance of the class with the specified + maximum residual and minimum number of iterations. + + + The maximum value for the residual below which the calculation is considered converged. + + + The minimum number of iterations for which the residual has to be below the maximum before + the calculation is considered converged. + + + + + Gets or sets the maximum value for the residual below which the calculation is considered + converged. + + Thrown if the Maximum is set to a negative value. + + + + Gets or sets the minimum number of iterations for which the residual has to be + below the maximum before the calculation is considered converged. + + Thrown if the BelowMaximumFor is set to a value less than 1. + + + + Determines the status of the iterative calculation based on the stop criteria stored + by the current . Result is set into Status field. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual stop criteria may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Gets the current calculation status. + + + + + Resets the to the pre-calculation state. + + + + + Clones the current and its settings. + + A new instance of the class. + + + + Loads the available objects from the specified assembly. + + The assembly which will be searched for setup objects. + If true, types that fail to load are simply ignored. Otherwise the exception is rethrown. + The types that should not be loaded. + + + + Loads the available objects from the specified assembly. + + The type in the assembly which should be searched for setup objects. + If true, types that fail to load are simply ignored. Otherwise the exception is rethrown. + The types that should not be loaded. + + + + Loads the available objects from the specified assembly. + + The of the assembly that should be searched for setup objects. + If true, types that fail to load are simply ignored. Otherwise the exception is rethrown. + The types that should not be loaded. + + + + Loads the available objects from the Math.NET Numerics assembly. + + The types that should not be loaded. + + + + Loads the available objects from the Math.NET Numerics assembly. + + + + + A unit preconditioner. This preconditioner does not actually do anything + it is only used when running an without + a preconditioner. + + + + + The coefficient matrix on which this preconditioner operates. + Is used to check dimensions on the different vectors that are processed. + + + + + Initializes the preconditioner and loads the internal data structures. + + + The matrix upon which the preconditioner is based. + + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + If and do not have the same size. + + + - or - + + + If the size of is different the number of rows of the coefficient matrix. + + + + + + True if the matrix storage format is dense. + + + + + True if all fields of this matrix can be set to any value. + False if some fields are fixed, like on a diagonal matrix. + + + + + True if the specified field can be set to any value. + False if the field is fixed, like an off-diagonal field on a diagonal matrix. + + + + + Retrieves the requested element without range checking. + + + + + Sets the element without range checking. + + + + + Evaluate the row and column at a specific data index. + + + + + True if the vector storage format is dense. + + + + + Retrieves the requested element without range checking. + + + + + Sets the element without range checking. + + + + + True if the matrix storage format is dense. + + + + + True if all fields of this matrix can be set to any value. + False if some fields are fixed, like on a diagonal matrix. + + + + + True if the specified field can be set to any value. + False if the field is fixed, like an off-diagonal field on a diagonal matrix. + + + + + Retrieves the requested element without range checking. + + + + + Sets the element without range checking. + + + + + Returns a hash code for this instance. + + + A hash code for this instance, suitable for use in hashing algorithms and data structures like a hash table. + + + + + True if the matrix storage format is dense. + + + + + True if all fields of this matrix can be set to any value. + False if some fields are fixed, like on a diagonal matrix. + + + + + True if the specified field can be set to any value. + False if the field is fixed, like an off-diagonal field on a diagonal matrix. + + + + + Gets or sets the value at the given row and column, with range checking. + + + The row of the element. + + + The column of the element. + + The value to get or set. + This method is ranged checked. and + to get and set values without range checking. + + + + Retrieves the requested element without range checking. + + + The row of the element. + + + The column of the element. + + + The requested element. + + Not range-checked. + + + + Sets the element without range checking. + + The row of the element. + The column of the element. + The value to set the element to. + WARNING: This method is not thread safe. Use "lock" with it and be sure to avoid deadlocks. + + + + Indicates whether the current object is equal to another object of the same type. + + + An object to compare with this object. + + + true if the current object is equal to the parameter; otherwise, false. + + + + + Determines whether the specified is equal to the current . + + + true if the specified is equal to the current ; otherwise, false. + + The to compare with the current . + + + + Serves as a hash function for a particular type. + + + A hash code for the current . + + + + The state array will not be modified, unless it is the same instance as the target array (which is allowed). + + + The state array will not be modified, unless it is the same instance as the target array (which is allowed). + + + The state array will not be modified, unless it is the same instance as the target array (which is allowed). + + + The state array will not be modified, unless it is the same instance as the target array (which is allowed). + + + + The array containing the row indices of the existing rows. Element "i" of the array gives the index of the + element in the array that is first non-zero element in a row "i". + The last value is equal to ValueCount, so that the number of non-zero entries in row "i" is always + given by RowPointers[i+i] - RowPointers[i]. This array thus has length RowCount+1. + + + + + An array containing the column indices of the non-zero values. Element "j" of the array + is the number of the column in matrix that contains the j-th value in the array. + + + + + Array that contains the non-zero elements of matrix. Values of the non-zero elements of matrix are mapped into the values + array using the row-major storage mapping described in a compressed sparse row (CSR) format. + + + + + Gets the number of non zero elements in the matrix. + + The number of non zero elements. + + + + True if the matrix storage format is dense. + + + + + True if all fields of this matrix can be set to any value. + False if some fields are fixed, like on a diagonal matrix. + + + + + True if the specified field can be set to any value. + False if the field is fixed, like an off-diagonal field on a diagonal matrix. + + + + + Retrieves the requested element without range checking. + + + The row of the element. + + + The column of the element. + + + The requested element. + + Not range-checked. + + + + Sets the element without range checking. + + The row of the element. + The column of the element. + The value to set the element to. + WARNING: This method is not thread safe. Use "lock" with it and be sure to avoid deadlocks. + + + + Delete value from internal storage + + Index of value in nonZeroValues array + Row number of matrix + WARNING: This method is not thread safe. Use "lock" with it and be sure to avoid deadlocks + + + + Find item Index in nonZeroValues array + + Matrix row index + Matrix column index + Item index + WARNING: This method is not thread safe. Use "lock" with it and be sure to avoid deadlocks + + + + Calculates the amount with which to grow the storage array's if they need to be + increased in size. + + The amount grown. + + + + Returns a hash code for this instance. + + + A hash code for this instance, suitable for use in hashing algorithms and data structures like a hash table. + + + + + Array that contains the indices of the non-zero values. + + + + + Array that contains the non-zero elements of the vector. + + + + + Gets the number of non-zero elements in the vector. + + + + + True if the vector storage format is dense. + + + + + Retrieves the requested element without range checking. + + + + + Sets the element without range checking. + + + + + Calculates the amount with which to grow the storage array's if they need to be + increased in size. + + The amount grown. + + + + Returns a hash code for this instance. + + + A hash code for this instance, suitable for use in hashing algorithms and data structures like a hash table. + + + + + True if the vector storage format is dense. + + + + + Gets or sets the value at the given index, with range checking. + + + The index of the element. + + The value to get or set. + This method is ranged checked. and + to get and set values without range checking. + + + + Retrieves the requested element without range checking. + + The index of the element. + The requested element. + Not range-checked. + + + + Sets the element without range checking. + + The index of the element. + The value to set the element to. + WARNING: This method is not thread safe. Use "lock" with it and be sure to avoid deadlocks. + + + + Indicates whether the current object is equal to another object of the same type. + + + An object to compare with this object. + + + true if the current object is equal to the parameter; otherwise, false. + + + + + Determines whether the specified is equal to the current . + + + true if the specified is equal to the current ; otherwise, false. + + The to compare with the current . + + + + Serves as a hash function for a particular type. + + + A hash code for the current . + + + + + Defines the generic class for Vector classes. + + Supported data types are double, single, , and . + + + + The zero value for type T. + + + + + The value of 1.0 for type T. + + + + + Negates vector and save result to + + Target vector + + + + Complex conjugates vector and save result to + + Target vector + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + The scalar to add. + The vector to store the result of the addition. + + + + Adds another vector to this vector and stores the result into the result vector. + + The vector to add to this one. + The vector to store the result of the addition. + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The vector to store the result of the subtraction. + + + + Subtracts each element of the vector from a scalar and stores the result in the result vector. + + The scalar to subtract from. + The vector to store the result of the subtraction. + + + + Subtracts another vector to this vector and stores the result into the result vector. + + The vector to subtract from this one. + The vector to store the result of the subtraction. + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + The scalar to multiply. + The vector to store the result of the multiplication. + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Computes the outer product M[i,j] = u[i]*v[j] of this and another vector and stores the result in the result matrix. + + The other vector + The matrix to store the result of the product. + + + + Divides each element of the vector by a scalar and stores the result in the result vector. + + The scalar denominator to use. + The vector to store the result of the division. + + + + Divides a scalar by each element of the vector and stores the result in the result vector. + + The scalar numerator to use. + The vector to store the result of the division. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the division. + + + + Pointwise raise this vector to an exponent and store the result into the result vector. + + The exponent to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Adds a scalar to each element of the vector. + + The scalar to add. + A copy of the vector with the scalar added. + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + The scalar to add. + The vector to store the result of the addition. + If this vector and are not the same size. + + + + Adds another vector to this vector. + + The vector to add to this one. + A new vector containing the sum of both vectors. + If this vector and are not the same size. + + + + Adds another vector to this vector and stores the result into the result vector. + + The vector to add to this one. + The vector to store the result of the addition. + If this vector and are not the same size. + If this vector and are not the same size. + + + + Subtracts a scalar from each element of the vector. + + The scalar to subtract. + A new vector containing the subtraction of this vector and the scalar. + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The vector to store the result of the subtraction. + If this vector and are not the same size. + + + + Subtracts each element of the vector from a scalar. + + The scalar to subtract from. + A new vector containing the subtraction of the scalar and this vector. + + + + Subtracts each element of the vector from a scalar and stores the result in the result vector. + + The scalar to subtract from. + The vector to store the result of the subtraction. + If this vector and are not the same size. + + + + Returns a negated vector. + + The negated vector. + Added as an alternative to the unary negation operator. + + + + Negates vector and save result to + + Target vector + + + + Subtracts another vector from this vector. + + The vector to subtract from this one. + A new vector containing the subtraction of the the two vectors. + If this vector and are not the same size. + + + + Subtracts another vector to this vector and stores the result into the result vector. + + The vector to subtract from this one. + The vector to store the result of the subtraction. + If this vector and are not the same size. + If this vector and are not the same size. + + + + Return vector with complex conjugate values of the source vector + + Conjugated vector + + + + Complex conjugates vector and save result to + + Target vector + + + + Multiplies a scalar to each element of the vector. + + The scalar to multiply. + A new vector that is the multiplication of the vector and the scalar. + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + The scalar to multiply. + The vector to store the result of the multiplication. + If this vector and are not the same size. + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + If is not of the same size. + + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + If is not of the same size. + If is . + + + + + Divides each element of the vector by a scalar. + + The scalar to divide with. + A new vector that is the division of the vector and the scalar. + + + + Divides each element of the vector by a scalar and stores the result in the result vector. + + The scalar to divide with. + The vector to store the result of the division. + If this vector and are not the same size. + + + + Divides a scalar by each element of the vector. + + The scalar to divide. + A new vector that is the division of the vector and the scalar. + + + + Divides a scalar by each element of the vector and stores the result in the result vector. + + The scalar to divide. + The vector to store the result of the division. + If this vector and are not the same size. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector containing the result. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector containing the result. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (vector % divisor), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector containing the result. + + + + Computes the remainder (vector % divisor), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (dividend % vector), where the result has the sign of the dividend, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector containing the result. + + + + Computes the remainder (dividend % vector), where the result has the sign of the dividend, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Pointwise multiplies this vector with another vector. + + The vector to pointwise multiply with this one. + A new vector which is the pointwise multiplication of the two vectors. + If this vector and are not the same size. + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + If this vector and are not the same size. + If this vector and are not the same size. + + + + Pointwise divide this vector with another vector. + + The pointwise denominator vector to use. + A new vector which is the pointwise division of the two vectors. + If this vector and are not the same size. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The vector to store the result of the pointwise division. + If this vector and are not the same size. + If this vector and are not the same size. + + + + Pointwise raise this vector to an exponent. + + The exponent to raise this vector values to. + + + + Pointwise raise this vector to an exponent and store the result into the result vector. + + The exponent to raise this vector values to. + The matrix to store the result into. + If this vector and are not the same size. + + + + Pointwise raise this vector to an exponent and store the result into the result vector. + + The exponent to raise this vector values to. + + + + Pointwise raise this vector to an exponent. + + The exponent to raise this vector values to. + The vector to store the result into. + If this vector and are not the same size. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this vector with another vector. + + The pointwise denominator vector to use. + If this vector and are not the same size. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The vector to store the result of the pointwise modulus. + If this vector and are not the same size. + If this vector and are not the same size. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this vector with another vector. + + The pointwise denominator vector to use. + If this vector and are not the same size. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The vector to store the result of the pointwise remainder. + If this vector and are not the same size. + If this vector and are not the same size. + + + + Helper function to apply a unary function to a vector. The function + f modifies the vector given to it in place. Before its + called, a copy of the 'this' vector with the same dimension is + first created, then passed to f. The copy is returned as the result + + Function which takes a vector, modifies it in place and returns void + New instance of vector which is the result + + + + Helper function to apply a unary function which modifies a vector + in place. + + Function which takes a vector, modifies it in place and returns void + The vector where the result is to be stored + If this vector and are not the same size. + + + + Helper function to apply a binary function which takes a scalar and + a vector and modifies the latter in place. A copy of the "this" + vector is therefore first made and then passed to f together with + the scalar argument. The copy is then returned as the result + + Function which takes a scalar and a vector, modifies the vector in place and returns void + The scalar to be passed to the function + The resulting vector + + + + Helper function to apply a binary function which takes a scalar and + a vector, modifies the latter in place and returns void. + + Function which takes a scalar and a vector, modifies the vector in place and returns void + The scalar to be passed to the function + If this vector and are not the same size. + + + + Helper function to apply a binary function which takes two vectors + and modifies the latter in place. A copy of the "this" vector is + first made and then passed to f together with the other vector. The + copy is then returned as the result + + Function which takes two vectors, modifies the second in place and returns void + The other vector to be passed to the function as argument. It is not modified + The resulting vector + If this vector and are not the same size. + + + + Helper function to apply a binary function which takes two vectors + and modifies the second one in place + + Function which takes two vectors, modifies the second in place and returns void + The other vector to be passed to the function as argument. It is not modified + The resulting vector + If this vector and are not the same size. + + + + Pointwise applies the exponent function to each value. + + + + + Pointwise applies the exponent function to each value. + + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the natural logarithm function to each value. + + + + + Pointwise applies the natural logarithm function to each value. + + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the abs function to each value + + + + + Pointwise applies the abs function to each value + + The vector to store the result + + + + Pointwise applies the acos function to each value + + + + + Pointwise applies the acos function to each value + + The vector to store the result + + + + Pointwise applies the asin function to each value + + + + + Pointwise applies the asin function to each value + + The vector to store the result + + + + Pointwise applies the atan function to each value + + + + + Pointwise applies the atan function to each value + + The vector to store the result + + + + Pointwise applies the atan2 function to each value of the current + vector and a given other vector being the 'x' of atan2 and the + 'this' vector being the 'y' + + + + + + + Pointwise applies the atan2 function to each value of the current + vector and a given other vector being the 'x' of atan2 and the + 'this' vector being the 'y' + + + + + + + Pointwise applies the ceiling function to each value + + + + + Pointwise applies the ceiling function to each value + + The vector to store the result + + + + Pointwise applies the cos function to each value + + + + + Pointwise applies the cos function to each value + + The vector to store the result + + + + Pointwise applies the cosh function to each value + + + + + Pointwise applies the cosh function to each value + + The vector to store the result + + + + Pointwise applies the floor function to each value + + + + + Pointwise applies the floor function to each value + + The vector to store the result + + + + Pointwise applies the log10 function to each value + + + + + Pointwise applies the log10 function to each value + + The vector to store the result + + + + Pointwise applies the round function to each value + + + + + Pointwise applies the round function to each value + + The vector to store the result + + + + Pointwise applies the sign function to each value + + + + + Pointwise applies the sign function to each value + + The vector to store the result + + + + Pointwise applies the sin function to each value + + + + + Pointwise applies the sin function to each value + + The vector to store the result + + + + Pointwise applies the sinh function to each value + + + + + Pointwise applies the sinh function to each value + + The vector to store the result + + + + Pointwise applies the sqrt function to each value + + + + + Pointwise applies the sqrt function to each value + + The vector to store the result + + + + Pointwise applies the tan function to each value + + + + + Pointwise applies the tan function to each value + + The vector to store the result + + + + Pointwise applies the tanh function to each value + + + + + Pointwise applies the tanh function to each value + + The vector to store the result + + + + Computes the outer product M[i,j] = u[i]*v[j] of this and another vector. + + The other vector + + + + Computes the outer product M[i,j] = u[i]*v[j] of this and another vector and stores the result in the result matrix. + + The other vector + The matrix to store the result of the product. + + + + Pointwise applies the minimum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the minimum with a scalar to each value. + + The scalar value to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the maximum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the maximum with a scalar to each value. + + The scalar value to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the absolute minimum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the absolute minimum with a scalar to each value. + + The scalar value to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the absolute maximum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the absolute maximum with a scalar to each value. + + The scalar value to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the minimum with the values of another vector to each value. + + The vector with the values to compare to. + + + + Pointwise applies the minimum with the values of another vector to each value. + + The vector with the values to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the maximum with the values of another vector to each value. + + The vector with the values to compare to. + + + + Pointwise applies the maximum with the values of another vector to each value. + + The vector with the values to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the absolute minimum with the values of another vector to each value. + + The vector with the values to compare to. + + + + Pointwise applies the absolute minimum with the values of another vector to each value. + + The vector with the values to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the absolute maximum with the values of another vector to each value. + + The vector with the values to compare to. + + + + Pointwise applies the absolute maximum with the values of another vector to each value. + + The vector with the values to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = (sum(abs(this[i])^p))^(1/p) + + + + Normalizes this vector to a unit vector with respect to the p-norm. + + The p value. + This vector normalized to a unit vector with respect to the p-norm. + + + + Returns the value of the absolute minimum element. + + The value of the absolute minimum element. + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the value of the absolute maximum element. + + The value of the absolute maximum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Returns the value of maximum element. + + The value of maximum element. + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the value of the minimum element. + + The value of the minimum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Computes the sum of the absolute value of the vector's elements. + + The sum of the absolute value of the vector's elements. + + + + Indicates whether the current object is equal to another object of the same type. + + An object to compare with this object. + + true if the current object is equal to the parameter; otherwise, false. + + + + + Determines whether the specified is equal to this instance. + + The to compare with this instance. + + true if the specified is equal to this instance; otherwise, false. + + + + + Returns a hash code for this instance. + + + A hash code for this instance, suitable for use in hashing algorithms and data structures like a hash table. + + + + + Returns an enumerator that iterates through the collection. + + + A that can be used to iterate through the collection. + + + + + Returns an enumerator that iterates through a collection. + + + An object that can be used to iterate through the collection. + + + + + Returns a string that describes the type, dimensions and shape of this vector. + + + + + Returns a string that represents the content of this vector, column by column. + + Maximum number of entries and thus lines per column. Typical value: 12; Minimum: 3. + Maximum number of chatacters per line over all columns. Typical value: 80; Minimum: 16. + Character to use to print if there is not enough space to print all entries. Typical value: "..". + Character to use to separate two coluns on a line. Typical value: " " (2 spaces). + Character to use to separate two rows/lines. Typical value: Environment.NewLine. + Function to provide a string for any given entry value. + + + + Returns a string that represents the content of this vector, column by column. + + Maximum number of entries and thus lines per column. Typical value: 12; Minimum: 3. + Maximum number of chatacters per line over all columns. Typical value: 80; Minimum: 16. + Floating point format string. Can be null. Default value: G6. + Format provider or culture. Can be null. + + + + Returns a string that represents the content of this vector, column by column. + + Floating point format string. Can be null. Default value: G6. + Format provider or culture. Can be null. + + + + Returns a string that summarizes this vector, column by column and with a type header. + + Maximum number of entries and thus lines per column. Typical value: 12; Minimum: 3. + Maximum number of chatacters per line over all columns. Typical value: 80; Minimum: 16. + Floating point format string. Can be null. Default value: G6. + Format provider or culture. Can be null. + + + + Returns a string that summarizes this vector. + The maximum number of cells can be configured in the class. + + + + + Returns a string that summarizes this vector. + The maximum number of cells can be configured in the class. + The format string is ignored. + + + + + Initializes a new instance of the Vector class. + + + + + Gets the raw vector data storage. + + + + + Gets the length or number of dimensions of this vector. + + + + Gets or sets the value at the given . + The index of the value to get or set. + The value of the vector at the given . + If is negative or + greater than the size of the vector. + + + Gets the value at the given without range checking.. + The index of the value to get or set. + The value of the vector at the given . + + + Sets the at the given without range checking.. + The index of the value to get or set. + The value to set. + + + + Resets all values to zero. + + + + + Sets all values of a subvector to zero. + + + + + Set all values whose absolute value is smaller than the threshold to zero, in-place. + + + + + Set all values that meet the predicate to zero, in-place. + + + + + Returns a deep-copy clone of the vector. + + A deep-copy clone of the vector. + + + + Set the values of this vector to the given values. + + The array containing the values to use. + If is . + If is not the same size as this vector. + + + + Copies the values of this vector into the target vector. + + The vector to copy elements into. + If is . + If is not the same size as this vector. + + + + Creates a vector containing specified elements. + + The first element to begin copying from. + The number of elements to copy. + A vector containing a copy of the specified elements. + If is not positive or + greater than or equal to the size of the vector. + If + is greater than or equal to the size of the vector. + + If is not positive. + + + + Copies the values of a given vector into a region in this vector. + + The field to start copying to + The number of fields to cpy. Must be positive. + The sub-vector to copy from. + If is + + + + Copies the requested elements from this vector to another. + + The vector to copy the elements to. + The element to start copying from. + The element to start copying to. + The number of elements to copy. + + + + Returns the data contained in the vector as an array. + The returned array will be independent from this vector. + A new memory block will be allocated for the array. + + The vector's data as an array. + + + + Returns the internal array of this vector if, and only if, this vector is stored by such an array internally. + Otherwise returns null. Changes to the returned array and the vector will affect each other. + Use ToArray instead if you always need an independent array. + + + + + Create a matrix based on this vector in column form (one single column). + + + This vector as a column matrix. + + + + + Create a matrix based on this vector in row form (one single row). + + + This vector as a row matrix. + + + + + Returns an IEnumerable that can be used to iterate through all values of the vector. + + + The enumerator will include all values, even if they are zero. + + + + + Returns an IEnumerable that can be used to iterate through all values of the vector. + + + The enumerator will include all values, even if they are zero. + + + + + Returns an IEnumerable that can be used to iterate through all values of the vector and their index. + + + The enumerator returns a Tuple with the first value being the element index + and the second value being the value of the element at that index. + The enumerator will include all values, even if they are zero. + + + + + Returns an IEnumerable that can be used to iterate through all values of the vector and their index. + + + The enumerator returns a Tuple with the first value being the element index + and the second value being the value of the element at that index. + The enumerator will include all values, even if they are zero. + + + + + Returns an IEnumerable that can be used to iterate through all non-zero values of the vector. + + + The enumerator will skip all elements with a zero value. + + + + + Returns an IEnumerable that can be used to iterate through all non-zero values of the vector and their index. + + + The enumerator returns a Tuple with the first value being the element index + and the second value being the value of the element at that index. + The enumerator will skip all elements with a zero value. + + + + + Applies a function to each value of this vector and replaces the value with its result. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value of this vector and replaces the value with its result. + The index of each value (zero-based) is passed as first argument to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value of this vector and replaces the value in the result vector. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value of this vector and replaces the value in the result vector. + The index of each value (zero-based) is passed as first argument to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value of this vector and replaces the value in the result vector. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value of this vector and replaces the value in the result vector. + The index of each value (zero-based) is passed as first argument to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value of this vector and returns the results as a new vector. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value of this vector and returns the results as a new vector. + The index of each value (zero-based) is passed as first argument to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value pair of two vectors and replaces the value in the result vector. + + + + + Applies a function to each value pair of two vectors and returns the results as a new vector. + + + + + Applies a function to update the status with each value pair of two vectors and returns the resulting status. + + + + + Returns a tuple with the index and value of the first element satisfying a predicate, or null if none is found. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns a tuple with the index and values of the first element pair of two vectors of the same size satisfying a predicate, or null if none is found. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if at least one element satisfies a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if at least one element pairs of two vectors of the same size satisfies a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if all elements satisfy a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if all element pairs of two vectors of the same size satisfy a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns a Vector containing the same values of . + + This method is included for completeness. + The vector to get the values from. + A vector containing the same values as . + If is . + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Adds a scalar to each element of a vector. + + The vector to add to. + The scalar value to add. + The result of the addition. + If is . + + + + Adds a scalar to each element of a vector. + + The scalar value to add. + The vector to add to. + The result of the addition. + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Subtracts a scalar from each element of a vector. + + The vector to subtract from. + The scalar value to subtract. + The result of the subtraction. + If is . + + + + Substracts each element of a vector from a scalar. + + The scalar value to subtract from. + The vector to subtract. + The result of the subtraction. + If is . + + + + Multiplies a vector with a scalar. + + The vector to scale. + The scalar value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a scalar. + + The scalar value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a scalar with a vector. + + The scalar to divide. + The vector. + The result of the division. + If is . + + + + Divides a vector with a scalar. + + The vector to divide. + The scalar value. + The result of the division. + If is . + + + + Pointwise divides two Vectors. + + The vector to divide. + The other vector. + The result of the division. + If and are not the same size. + If is . + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + of each element of the vector of the given divisor. + + The vector whose elements we want to compute the remainder of. + The divisor to use. + If is . + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + of the given dividend of each element of the vector. + + The dividend we want to compute the remainder of. + The vector whose elements we want to use as divisor. + If is . + + + + Computes the pointwise remainder (% operator), where the result has the sign of the dividend, + of each element of two vectors. + + The vector whose elements we want to compute the remainder of. + The divisor to use. + If and are not the same size. + If is . + + + + Computes the sqrt of a vector pointwise + + The input vector + + + + + Computes the exponential of a vector pointwise + + The input vector + + + + + Computes the log of a vector pointwise + + The input vector + + + + + Computes the log10 of a vector pointwise + + The input vector + + + + + Computes the sin of a vector pointwise + + The input vector + + + + + Computes the cos of a vector pointwise + + The input vector + + + + + Computes the tan of a vector pointwise + + The input vector + + + + + Computes the asin of a vector pointwise + + The input vector + + + + + Computes the acos of a vector pointwise + + The input vector + + + + + Computes the atan of a vector pointwise + + The input vector + + + + + Computes the sinh of a vector pointwise + + The input vector + + + + + Computes the cosh of a vector pointwise + + The input vector + + + + + Computes the tanh of a vector pointwise + + The input vector + + + + + Computes the absolute value of a vector pointwise + + The input vector + + + + + Computes the floor of a vector pointwise + + The input vector + + + + + Computes the ceiling of a vector pointwise + + The input vector + + + + + Computes the rounded value of a vector pointwise + + The input vector + + + + + Converts a vector to single precision. + + + + + Converts a vector to double precision. + + + + + Converts a vector to single precision complex numbers. + + + + + Converts a vector to double precision complex numbers. + + + + + Gets a single precision complex vector with the real parts from the given vector. + + + + + Gets a double precision complex vector with the real parts from the given vector. + + + + + Gets a real vector representing the real parts of a complex vector. + + + + + Gets a real vector representing the real parts of a complex vector. + + + + + Gets a real vector representing the imaginary parts of a complex vector. + + + + + Gets a real vector representing the imaginary parts of a complex vector. + + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + + Predictor matrix X + Response vector Y + The direct method to be used to compute the regression. + Best fitting vector for model parameters β + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + + Predictor matrix X + Response matrix Y + The direct method to be used to compute the regression. + Best fitting vector for model parameters β + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + + List of predictor-arrays. + List of responses + True if an intercept should be added as first artificial predictor value. Default = false. + The direct method to be used to compute the regression. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + Uses the cholesky decomposition of the normal equations. + + Sequence of predictor-arrays and their response. + True if an intercept should be added as first artificial predictor value. Default = false. + The direct method to be used to compute the regression. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + Uses the cholesky decomposition of the normal equations. + + Predictor matrix X + Response vector Y + Best fitting vector for model parameters β + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + Uses the cholesky decomposition of the normal equations. + + Predictor matrix X + Response matrix Y + Best fitting vector for model parameters β + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + Uses the cholesky decomposition of the normal equations. + + List of predictor-arrays. + List of responses + True if an intercept should be added as first artificial predictor value. Default = false. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + Uses the cholesky decomposition of the normal equations. + + Sequence of predictor-arrays and their response. + True if an intercept should be added as first artificial predictor value. Default = false. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + Uses an orthogonal decomposition and is therefore more numerically stable than the normal equations but also slower. + + Predictor matrix X + Response vector Y + Best fitting vector for model parameters β + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + Uses an orthogonal decomposition and is therefore more numerically stable than the normal equations but also slower. + + Predictor matrix X + Response matrix Y + Best fitting vector for model parameters β + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + Uses an orthogonal decomposition and is therefore more numerically stable than the normal equations but also slower. + + List of predictor-arrays. + List of responses + True if an intercept should be added as first artificial predictor value. Default = false. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + Uses an orthogonal decomposition and is therefore more numerically stable than the normal equations but also slower. + + Sequence of predictor-arrays and their response. + True if an intercept should be added as first artificial predictor value. Default = false. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + Uses a singular value decomposition and is therefore more numerically stable (especially if ill-conditioned) than the normal equations or QR but also slower. + + Predictor matrix X + Response vector Y + Best fitting vector for model parameters β + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + Uses a singular value decomposition and is therefore more numerically stable (especially if ill-conditioned) than the normal equations or QR but also slower. + + Predictor matrix X + Response matrix Y + Best fitting vector for model parameters β + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + Uses a singular value decomposition and is therefore more numerically stable (especially if ill-conditioned) than the normal equations or QR but also slower. + + List of predictor-arrays. + List of responses + True if an intercept should be added as first artificial predictor value. Default = false. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + Uses a singular value decomposition and is therefore more numerically stable (especially if ill-conditioned) than the normal equations or QR but also slower. + + Sequence of predictor-arrays and their response. + True if an intercept should be added as first artificial predictor value. Default = false. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Least-Squares fitting the points (x,y) to a line y : x -> a+b*x, + returning its best fitting parameters as (a, b) tuple, + where a is the intercept and b the slope. + + Predictor (independent) + Response (dependent) + + + + Least-Squares fitting the points (x,y) to a line y : x -> a+b*x, + returning its best fitting parameters as (a, b) tuple, + where a is the intercept and b the slope. + + Predictor-Response samples as tuples + + + + Weighted Linear Regression using normal equations. + + Predictor matrix X + Response vector Y + Weight matrix W, usually diagonal with an entry for each predictor (row). + + + + Weighted Linear Regression using normal equations. + + Predictor matrix X + Response matrix Y + Weight matrix W, usually diagonal with an entry for each predictor (row). + + + + Weighted Linear Regression using normal equations. + + Predictor matrix X + Response vector Y + Weight matrix W, usually diagonal with an entry for each predictor (row). + True if an intercept should be added as first artificial predictor value. Default = false. + + + + Weighted Linear Regression using normal equations. + + List of sample vectors (predictor) together with their response. + List of weights, one for each sample. + True if an intercept should be added as first artificial predictor value. Default = false. + + + + Locally-Weighted Linear Regression using normal equations. + + + + + Locally-Weighted Linear Regression using normal equations. + + + + + First Order AB method(same as Forward Euler) + + Initial value + Start Time + End Time + Size of output array(the larger, the finer) + ode model + approximation with size N + + + + Second Order AB Method + + Initial value 1 + Start Time + End Time + Size of output array(the larger, the finer) + ode model + approximation with size N + + + + Third Order AB Method + + Initial value 1 + Start Time + End Time + Size of output array(the larger, the finer) + ode model + approximation with size N + + + + Fourth Order AB Method + + Initial value 1 + Start Time + End Time + Size of output array(the larger, the finer) + ode model + approximation with size N + + + + ODE Solver Algorithms + + + + + Second Order Runge-Kutta method + + initial value + start time + end time + Size of output array(the larger, the finer) + ode function + approximations + + + + Fourth Order Runge-Kutta method + + initial value + start time + end time + Size of output array(the larger, the finer) + ode function + approximations + + + + Second Order Runge-Kutta to solve ODE SYSTEM + + initial vector + start time + end time + Size of output array(the larger, the finer) + ode function + approximations + + + + Fourth Order Runge-Kutta to solve ODE SYSTEM + + initial vector + start time + end time + Size of output array(the larger, the finer) + ode function + approximations + + + + Class to represent a permutation for a subset of the natural numbers. + + + + + Entry _indices[i] represents the location to which i is permuted to. + + + + + Initializes a new instance of the Permutation class. + + An array which represents where each integer is permuted too: indices[i] represents that integer i + is permuted to location indices[i]. + + + + Gets the number of elements this permutation is over. + + + + + Computes where permutes too. + + The index to permute from. + The index which is permuted to. + + + + Computes the inverse of the permutation. + + The inverse of the permutation. + + + + Construct an array from a sequence of inversions. + + + From wikipedia: the permutation 12043 has the inversions (0,2), (1,2) and (3,4). This would be + encoded using the array [22244]. + + The set of inversions to construct the permutation from. + A permutation generated from a sequence of inversions. + + + + Construct a sequence of inversions from the permutation. + + + From wikipedia: the permutation 12043 has the inversions (0,2), (1,2) and (3,4). This would be + encoded using the array [22244]. + + A sequence of inversions. + + + + Checks whether the array represents a proper permutation. + + An array which represents where each integer is permuted too: indices[i] represents that integer i + is permuted to location indices[i]. + True if represents a proper permutation, false otherwise. + + + + Utilities for working with floating point numbers. + + + + Useful links: + + + http://docs.sun.com/source/806-3568/ncg_goldberg.html#689 - What every computer scientist should know about floating-point arithmetic + + + http://en.wikipedia.org/wiki/Machine_epsilon - Gives the definition of machine epsilon + + + + + + + + Compares two doubles and determines which double is bigger. + a < b -> -1; a ~= b (almost equal according to parameter) -> 0; a > b -> +1. + + The first value. + The second value. + The absolute accuracy required for being almost equal. + + + + Compares two doubles and determines which double is bigger. + a < b -> -1; a ~= b (almost equal according to parameter) -> 0; a > b -> +1. + + The first value. + The second value. + The number of decimal places on which the values must be compared. Must be 1 or larger. + + + + Compares two doubles and determines which double is bigger. + a < b -> -1; a ~= b (almost equal according to parameter) -> 0; a > b -> +1. + + The first value. + The second value. + The relative accuracy required for being almost equal. + + + + Compares two doubles and determines which double is bigger. + a < b -> -1; a ~= b (almost equal according to parameter) -> 0; a > b -> +1. + + The first value. + The second value. + The number of decimal places on which the values must be compared. Must be 1 or larger. + + + + Compares two doubles and determines which double is bigger. + a < b -> -1; a ~= b (almost equal according to parameter) -> 0; a > b -> +1. + + The first value. + The second value. + The maximum error in terms of Units in Last Place (ulps), i.e. the maximum number of decimals that may be different. Must be 1 or larger. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The absolute accuracy required for being almost equal. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The absolute accuracy required for being almost equal. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The relative accuracy required for being almost equal. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The relative accuracy required for being almost equal. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the tolerance or not. Equality comparison is based on the binary representation. + + The first value. + The second value. + The maximum number of floating point values for which the two values are considered equal. Must be 1 or larger. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the tolerance or not. Equality comparison is based on the binary representation. + + The first value. + The second value. + The maximum number of floating point values for which the two values are considered equal. Must be 1 or larger. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of thg. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of thg. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The absolute accuracy required for being almost equal. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The absolute accuracy required for being almost equal. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The number of decimal places. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The number of decimal places. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The relative accuracy required for being almost equal. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The relative accuracy required for being almost equal. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the tolerance or not. Equality comparison is based on the binary representation. + + The first value. + The second value. + The maximum number of floating point values for which the two values are considered equal. Must be 1 or larger. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the tolerance or not. Equality comparison is based on the binary representation. + + The first value. + The second value. + The maximum number of floating point values for which the two values are considered equal. Must be 1 or larger. + true if the first value is smaller than the second value; otherwise false. + + + + The number of binary digits used to represent the binary number for a double precision floating + point value. i.e. there are this many digits used to represent the + actual number, where in a number as: 0.134556 * 10^5 the digits are 0.134556 and the exponent is 5. + + + + + The number of binary digits used to represent the binary number for a single precision floating + point value. i.e. there are this many digits used to represent the + actual number, where in a number as: 0.134556 * 10^5 the digits are 0.134556 and the exponent is 5. + + + + + Standard epsilon, the maximum relative precision of IEEE 754 double-precision floating numbers (64 bit). + According to the definition of Prof. Demmel and used in LAPACK and Scilab. + + + + + Standard epsilon, the maximum relative precision of IEEE 754 double-precision floating numbers (64 bit). + According to the definition of Prof. Higham and used in the ISO C standard and MATLAB. + + + + + Standard epsilon, the maximum relative precision of IEEE 754 single-precision floating numbers (32 bit). + According to the definition of Prof. Demmel and used in LAPACK and Scilab. + + + + + Standard epsilon, the maximum relative precision of IEEE 754 single-precision floating numbers (32 bit). + According to the definition of Prof. Higham and used in the ISO C standard and MATLAB. + + + + + Actual double precision machine epsilon, the smallest number that can be subtracted from 1, yielding a results different than 1. + This is also known as unit roundoff error. According to the definition of Prof. Demmel. + On a standard machine this is equivalent to `DoublePrecision`. + + + + + Actual double precision machine epsilon, the smallest number that can be added to 1, yielding a results different than 1. + This is also known as unit roundoff error. According to the definition of Prof. Higham. + On a standard machine this is equivalent to `PositiveDoublePrecision`. + + + + + The number of significant decimal places of double-precision floating numbers (64 bit). + + + + + The number of significant decimal places of single-precision floating numbers (32 bit). + + + + + Value representing 10 * 2^(-53) = 1.11022302462516E-15 + + + + + Value representing 10 * 2^(-24) = 5.96046447753906E-07 + + + + + Returns the magnitude of the number. + + The value. + The magnitude of the number. + + + + Returns the magnitude of the number. + + The value. + The magnitude of the number. + + + + Returns the number divided by it's magnitude, effectively returning a number between -10 and 10. + + The value. + The value of the number. + + + + Returns a 'directional' long value. This is a long value which acts the same as a double, + e.g. a negative double value will return a negative double value starting at 0 and going + more negative as the double value gets more negative. + + The input double value. + A long value which is roughly the equivalent of the double value. + + + + Returns a 'directional' int value. This is a int value which acts the same as a float, + e.g. a negative float value will return a negative int value starting at 0 and going + more negative as the float value gets more negative. + + The input float value. + An int value which is roughly the equivalent of the double value. + + + + Increments a floating point number to the next bigger number representable by the data type. + + The value which needs to be incremented. + How many times the number should be incremented. + + The incrementation step length depends on the provided value. + Increment(double.MaxValue) will return positive infinity. + + The next larger floating point value. + + + + Decrements a floating point number to the next smaller number representable by the data type. + + The value which should be decremented. + How many times the number should be decremented. + + The decrementation step length depends on the provided value. + Decrement(double.MinValue) will return negative infinity. + + The next smaller floating point value. + + + + Forces small numbers near zero to zero, according to the specified absolute accuracy. + + The real number to coerce to zero, if it is almost zero. + The maximum count of numbers between the zero and the number . + + Zero if || is fewer than numbers from zero, otherwise. + + + + + Forces small numbers near zero to zero, according to the specified absolute accuracy. + + The real number to coerce to zero, if it is almost zero. + The maximum count of numbers between the zero and the number . + + Zero if || is fewer than numbers from zero, otherwise. + + + Thrown if is smaller than zero. + + + + + Forces small numbers near zero to zero, according to the specified absolute accuracy. + + The real number to coerce to zero, if it is almost zero. + The absolute threshold for to consider it as zero. + Zero if || is smaller than , otherwise. + + Thrown if is smaller than zero. + + + + + Forces small numbers near zero to zero. + + The real number to coerce to zero, if it is almost zero. + Zero if || is smaller than 2^(-53) = 1.11e-16, otherwise. + + + + Determines the range of floating point numbers that will match the specified value with the given tolerance. + + The value. + The ulps difference. + + Thrown if is smaller than zero. + + Tuple of the bottom and top range ends. + + + + Returns the floating point number that will match the value with the tolerance on the maximum size (i.e. the result is + always bigger than the value) + + The value. + The ulps difference. + The maximum floating point number which is larger than the given . + + + + Returns the floating point number that will match the value with the tolerance on the minimum size (i.e. the result is + always smaller than the value) + + The value. + The ulps difference. + The minimum floating point number which is smaller than the given . + + + + Determines the range of ulps that will match the specified value with the given tolerance. + + The value. + The relative difference. + + Thrown if is smaller than zero. + + + Thrown if is double.PositiveInfinity or double.NegativeInfinity. + + + Thrown if is double.NaN. + + + Tuple with the number of ULPS between the value and the value - relativeDifference as first, + and the number of ULPS between the value and the value + relativeDifference as second value. + + + + + Evaluates the count of numbers between two double numbers + + The first parameter. + The second parameter. + The second number is included in the number, thus two equal numbers evaluate to zero and two neighbor numbers evaluate to one. Therefore, what is returned is actually the count of numbers between plus 1. + The number of floating point values between and . + + Thrown if is double.PositiveInfinity or double.NegativeInfinity. + + + Thrown if is double.NaN. + + + Thrown if is double.PositiveInfinity or double.NegativeInfinity. + + + Thrown if is double.NaN. + + + + + Evaluates the minimum distance to the next distinguishable number near the argument value. + + The value used to determine the minimum distance. + + Relative Epsilon (positive double or NaN). + + Evaluates the negative epsilon. The more common positive epsilon is equal to two times this negative epsilon. + + + + + Evaluates the minimum distance to the next distinguishable number near the argument value. + + The value used to determine the minimum distance. + + Relative Epsilon (positive float or NaN). + + Evaluates the negative epsilon. The more common positive epsilon is equal to two times this negative epsilon. + + + + + Evaluates the minimum distance to the next distinguishable number near the argument value. + + The value used to determine the minimum distance. + Relative Epsilon (positive double or NaN) + Evaluates the positive epsilon. See also + + + + + Evaluates the minimum distance to the next distinguishable number near the argument value. + + The value used to determine the minimum distance. + Relative Epsilon (positive float or NaN) + Evaluates the positive epsilon. See also + + + + + Calculates the actual (negative) double precision machine epsilon - the smallest number that can be subtracted from 1, yielding a results different than 1. + This is also known as unit roundoff error. According to the definition of Prof. Demmel. + + Positive Machine epsilon + + + + Calculates the actual positive double precision machine epsilon - the smallest number that can be added to 1, yielding a results different than 1. + This is also known as unit roundoff error. According to the definition of Prof. Higham. + + Machine epsilon + + + + Compares two doubles and determines if they are equal + within the specified maximum absolute error. + + The norm of the first value (can be negative). + The norm of the second value (can be negative). + The norm of the difference of the two values (can be negative). + The absolute accuracy required for being almost equal. + True if both doubles are almost equal up to the specified maximum absolute error, false otherwise. + + + + Compares two doubles and determines if they are equal + within the specified maximum absolute error. + + The first value. + The second value. + The absolute accuracy required for being almost equal. + True if both doubles are almost equal up to the specified maximum absolute error, false otherwise. + + + + Compares two doubles and determines if they are equal + within the specified maximum error. + + The norm of the first value (can be negative). + The norm of the second value (can be negative). + The norm of the difference of the two values (can be negative). + The accuracy required for being almost equal. + True if both doubles are almost equal up to the specified maximum error, false otherwise. + + + + Compares two doubles and determines if they are equal + within the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + True if both doubles are almost equal up to the specified maximum error, false otherwise. + + + + Compares two doubles and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two complex and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two complex and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two complex and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two doubles and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two complex and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two complex and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two complex and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Checks whether two real numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Checks whether two real numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Checks whether two Compex numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Checks whether two Compex numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Checks whether two real numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Checks whether two real numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Checks whether two Compex numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Checks whether two Compex numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not, using the + number of decimal places as an absolute measure. + + + + The values are equal if the difference between the two numbers is smaller than 0.5e-decimalPlaces. We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The norm of the first value (can be negative). + The norm of the second value (can be negative). + The norm of the difference of the two values (can be negative). + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not, using the + number of decimal places as an absolute measure. + + + + The values are equal if the difference between the two numbers is smaller than 0.5e-decimalPlaces. We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not. If the numbers + are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The norm of the first value (can be negative). + The norm of the second value (can be negative). + The norm of the difference of the two values (can be negative). + The number of decimal places. + Thrown if is smaller than zero. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not. If the numbers + are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not, using the + number of decimal places as an absolute measure. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not, using the + number of decimal places as an absolute measure. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not, using the + number of decimal places as an absolute measure. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not, using the + number of decimal places as an absolute measure. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not. If the numbers + are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not. If the numbers + are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not. If the numbers + are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not. If the numbers + are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the tolerance or not. Equality comparison is based on the binary representation. + + + + Determines the 'number' of floating point numbers between two values (i.e. the number of discrete steps + between the two numbers) and then checks if that is within the specified tolerance. So if a tolerance + of 1 is passed then the result will be true only if the two numbers have the same binary representation + OR if they are two adjacent numbers that only differ by one step. + + + The comparison method used is explained in http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm . The article + at http://www.extremeoptimization.com/resources/Articles/FPDotNetConceptsAndFormats.aspx explains how to transform the C code to + .NET enabled code without using pointers and unsafe code. + + + The first value. + The second value. + The maximum number of floating point values between the two values. Must be 1 or larger. + Thrown if is smaller than one. + + + + Compares two floats and determines if they are equal to within the tolerance or not. Equality comparison is based on the binary representation. + + The first value. + The second value. + The maximum number of floating point values between the two values. Must be 1 or larger. + Thrown if is smaller than one. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two vectors and determines if they are equal within the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two vectors and determines if they are equal within the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two vectors and determines if they are equal to within the specified number + of decimal places or not, using the number of decimal places as an absolute measure. + + The first value. + The second value. + The number of decimal places. + + + + Compares two vectors and determines if they are equal to within the specified number of decimal places or not. + If the numbers are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + The first value. + The second value. + The number of decimal places. + + + + Compares two matrices and determines if they are equal within the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two matrices and determines if they are equal within the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two matrices and determines if they are equal to within the specified number + of decimal places or not, using the number of decimal places as an absolute measure. + + The first value. + The second value. + The number of decimal places. + + + + Compares two matrices and determines if they are equal to within the specified number of decimal places or not. + If the numbers are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + The first value. + The second value. + The number of decimal places. + + + + Support Interface for Precision Operations (like AlmostEquals). + + Type of the implementing class. + + + + Returns a Norm of a value of this type, which is appropriate for measuring how + close this value is to zero. + + A norm of this value. + + + + Returns a Norm of the difference of two values of this type, which is + appropriate for measuring how close together these two values are. + + The value to compare with. + A norm of the difference between this and the other value. + + + + Consistency vs. performance trade-off between runs on different machines. + + + + Consistent on the same CPU only (maximum performance) + + + Consistent on Intel and compatible CPUs with SSE2 support (maximum compatibility) + + + Consistent on Intel CPUs supporting SSE2 or later + + + Consistent on Intel CPUs supporting SSE4.2 or later + + + Consistent on Intel CPUs supporting AVX or later + + + Consistent on Intel CPUs supporting AVX2 or later + + + + Use the best provider available. + + + + + Use a specific provider if configured, e.g. using the + "MathNetNumericsFFTProvider" environment variable, + or fall back to the best provider. + + + + + Try to find out whether the provider is available, at least in principle. + Verification may still fail if available, but it will certainly fail if unavailable. + + + + + Initialize and verify that the provided is indeed available. If not, fall back to alternatives like the managed provider + + + + + Try to find out whether the provider is available, at least in principle. + Verification may still fail if available, but it will certainly fail if unavailable. + + + + + Initialize and verify that the provided is indeed available. If not, fall back to alternatives like the managed provider + + + + + How to transpose a matrix. + + + + + Don't transpose a matrix. + + + + + Transpose a matrix. + + + + + Conjugate transpose a complex matrix. + + If a conjugate transpose is used with a real matrix, then the matrix is just transposed. + + + + Types of matrix norms. + + + + + The 1-norm. + + + + + The Frobenius norm. + + + + + The infinity norm. + + + + + The largest absolute value norm. + + + + + Interface to linear algebra algorithms that work off 1-D arrays. + + + + + Try to find out whether the provider is available, at least in principle. + Verification may still fail if available, but it will certainly fail if unavailable. + + + + + Initialize and verify that the provided is indeed available. If not, fall back to alternatives like the managed provider + + + + + Interface to linear algebra algorithms that work off 1-D arrays. + + Supported data types are Double, Single, Complex, and Complex32. + + + + Adds a scaled vector to another: result = y + alpha*x. + + The vector to update. + The value to scale by. + The vector to add to . + The result of the addition. + This is similar to the AXPY BLAS routine. + + + + Scales an array. Can be used to scale a vector and a matrix. + + The scalar. + The values to scale. + This result of the scaling. + This is similar to the SCAL BLAS routine. + + + + Conjugates an array. Can be used to conjugate a vector and a matrix. + + The values to conjugate. + This result of the conjugation. + + + + Computes the dot product of x and y. + + The vector x. + The vector y. + The dot product of x and y. + This is equivalent to the DOT BLAS routine. + + + + Does a point wise add of two arrays z = x + y. This can be used + to add vectors or matrices. + + The array x. + The array y. + The result of the addition. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise subtraction of two arrays z = x - y. This can be used + to subtract vectors or matrices. + + The array x. + The array y. + The result of the subtraction. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise multiplication of two arrays z = x * y. This can be used + to multiply elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise multiplication. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise division of two arrays z = x / y. This can be used + to divide elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise division. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise power of two arrays z = x ^ y. This can be used + to raise elements of vectors or matrices to the powers of another vector or matrix. + + The array x. + The array y. + The result of the point wise power. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Computes the requested of the matrix. + + The type of norm to compute. + The number of rows. + The number of columns. + The matrix to compute the norm from. + + The requested of the matrix. + + + + + Multiples two matrices. result = x * y + + The x matrix. + The number of rows in the x matrix. + The number of columns in the x matrix. + The y matrix. + The number of rows in the y matrix. + The number of columns in the y matrix. + Where to store the result of the multiplication. + This is a simplified version of the BLAS GEMM routine with alpha + set to 1.0 and beta set to 0.0, and x and y are not transposed. + + + + Multiplies two matrices and updates another with the result. c = alpha*op(a)*op(b) + beta*c + + How to transpose the matrix. + How to transpose the matrix. + The value to scale matrix. + The a matrix. + The number of rows in the matrix. + The number of columns in the matrix. + The b matrix + The number of rows in the matrix. + The number of columns in the matrix. + The value to scale the matrix. + The c matrix. + + + + Computes the LUP factorization of A. P*A = L*U. + + An by matrix. The matrix is overwritten with the + the LU factorization on exit. The lower triangular factor L is stored in under the diagonal of (the diagonal is always 1.0 + for the L factor). The upper triangular factor U is stored on and above the diagonal of . + The order of the square matrix . + On exit, it contains the pivot indices. The size of the array must be . + This is equivalent to the GETRF LAPACK routine. + + + + Computes the inverse of matrix using LU factorization. + + The N by N matrix to invert. Contains the inverse On exit. + The order of the square matrix . + This is equivalent to the GETRF and GETRI LAPACK routines. + + + + Computes the inverse of a previously factored matrix. + + The LU factored N by N matrix. Contains the inverse On exit. + The order of the square matrix . + The pivot indices of . + This is equivalent to the GETRI LAPACK routine. + + + + Solves A*X=B for X using LU factorization. + + The number of columns of B. + The square matrix A. + The order of the square matrix . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRF and GETRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The number of columns of B. + The factored A matrix. + The order of the square matrix . + The pivot indices of . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRS LAPACK routine. + + + + Computes the Cholesky factorization of A. + + On entry, a square, positive definite matrix. On exit, the matrix is overwritten with the + the Cholesky factorization. + The number of rows or columns in the matrix. + This is equivalent to the POTRF LAPACK routine. + + + + Solves A*X=B for X using Cholesky factorization. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRF add POTRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRS LAPACK routine. + + + + Computes the full QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the R matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A M by M matrix that holds the Q matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Computes the thin QR factorization of A where M > N. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the Q matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A N by N matrix that holds the R matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Solves A*X=B for X using QR factorization of A. + + The A matrix. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Solves A*X=B for X using a previously QR factored matrix. + + The Q matrix obtained by QR factor. This is only used for the managed provider and can be + null for the native provider. The native provider uses the Q portion stored in the R matrix. + The R matrix obtained by calling . + The number of rows in the A matrix. + The number of columns in the A matrix. + Contains additional information on Q. Only used for the native solver + and can be null for the managed provider. + On entry the B matrix; on exit the X matrix. + The number of columns of B. + On exit, the solution matrix. + Rows must be greater or equal to columns. + The type of QR factorization to perform. + + + + Computes the singular value decomposition of A. + + Compute the singular U and VT vectors or not. + On entry, the M by N matrix to decompose. On exit, A may be overwritten. + The number of rows in the A matrix. + The number of columns in the A matrix. + The singular values of A in ascending value. + If is true, on exit U contains the left + singular vectors. + If is true, on exit VT contains the transposed + right singular vectors. + This is equivalent to the GESVD LAPACK routine. + + + + Solves A*X=B for X using the singular value decomposition of A. + + On entry, the M by N matrix to decompose. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Solves A*X=B for X using a previously SVD decomposed matrix. + + The number of rows in the A matrix. + The number of columns in the A matrix. + The s values returned by . + The left singular vectors returned by . + The right singular vectors returned by . + The B matrix + The number of columns of B. + On exit, the solution matrix. + + + + Computes the eigenvalues and eigenvectors of a matrix. + + Whether the matrix is symmetric or not. + The order of the matrix. + The matrix to decompose. The lenth of the array must be order * order. + On output, the matrix contains the eigen vectors. The lenth of the array must be order * order. + On output, the eigen values (λ) of matrix in ascending value. The length of the arry must . + On output, the block diagonal eigenvalue matrix. The lenth of the array must be order * order. + + + + Use the best provider available. + + + + + Use a specific provider if configured, e.g. using the + "MathNetNumericsLAProvider" environment variable, + or fall back to the best provider. + + + + + The managed linear algebra provider. + + + The managed linear algebra provider. + + + The managed linear algebra provider. + + + The managed linear algebra provider. + + + The managed linear algebra provider. + + + + + Adds a scaled vector to another: result = y + alpha*x. + + The vector to update. + The value to scale by. + The vector to add to . + The result of the addition. + This is similar to the AXPY BLAS routine. + + + + Scales an array. Can be used to scale a vector and a matrix. + + The scalar. + The values to scale. + This result of the scaling. + This is similar to the SCAL BLAS routine. + + + + Conjugates an array. Can be used to conjugate a vector and a matrix. + + The values to conjugate. + This result of the conjugation. + + + + Computes the dot product of x and y. + + The vector x. + The vector y. + The dot product of x and y. + This is equivalent to the DOT BLAS routine. + + + + Does a point wise add of two arrays z = x + y. This can be used + to add vectors or matrices. + + The array x. + The array y. + The result of the addition. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise subtraction of two arrays z = x - y. This can be used + to subtract vectors or matrices. + + The array x. + The array y. + The result of the subtraction. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise multiplication of two arrays z = x * y. This can be used + to multiple elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise multiplication. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise division of two arrays z = x / y. This can be used + to divide elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise division. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise power of two arrays z = x ^ y. This can be used + to raise elements of vectors or matrices to the powers of another vector or matrix. + + The array x. + The array y. + The result of the point wise power. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Computes the requested of the matrix. + + The type of norm to compute. + The number of rows. + The number of columns. + The matrix to compute the norm from. + + The requested of the matrix. + + + + + Multiples two matrices. result = x * y + + The x matrix. + The number of rows in the x matrix. + The number of columns in the x matrix. + The y matrix. + The number of rows in the y matrix. + The number of columns in the y matrix. + Where to store the result of the multiplication. + This is a simplified version of the BLAS GEMM routine with alpha + set to 1.0 and beta set to 0.0, and x and y are not transposed. + + + + Multiplies two matrices and updates another with the result. c = alpha*op(a)*op(b) + beta*c + + How to transpose the matrix. + How to transpose the matrix. + The value to scale matrix. + The a matrix. + The number of rows in the matrix. + The number of columns in the matrix. + The b matrix + The number of rows in the matrix. + The number of columns in the matrix. + The value to scale the matrix. + The c matrix. + + + + Cache-Oblivious Matrix Multiplication + + if set to true transpose matrix A. + if set to true transpose matrix B. + The value to scale the matrix A with. + The matrix A. + Row-shift of the left matrix + Column-shift of the left matrix + The matrix B. + Row-shift of the right matrix + Column-shift of the right matrix + The matrix C. + Row-shift of the result matrix + Column-shift of the result matrix + The number of rows of matrix op(A) and of the matrix C. + The number of columns of matrix op(B) and of the matrix C. + The number of columns of matrix op(A) and the rows of the matrix op(B). + The constant number of rows of matrix op(A) and of the matrix C. + The constant number of columns of matrix op(B) and of the matrix C. + The constant number of columns of matrix op(A) and the rows of the matrix op(B). + Indicates if this is the first recursion. + + + + Computes the LUP factorization of A. P*A = L*U. + + An by matrix. The matrix is overwritten with the + the LU factorization on exit. The lower triangular factor L is stored in under the diagonal of (the diagonal is always 1.0 + for the L factor). The upper triangular factor U is stored on and above the diagonal of . + The order of the square matrix . + On exit, it contains the pivot indices. The size of the array must be . + This is equivalent to the GETRF LAPACK routine. + + + + Computes the inverse of matrix using LU factorization. + + The N by N matrix to invert. Contains the inverse On exit. + The order of the square matrix . + This is equivalent to the GETRF and GETRI LAPACK routines. + + + + Computes the inverse of a previously factored matrix. + + The LU factored N by N matrix. Contains the inverse On exit. + The order of the square matrix . + The pivot indices of . + This is equivalent to the GETRI LAPACK routine. + + + + Solves A*X=B for X using LU factorization. + + The number of columns of B. + The square matrix A. + The order of the square matrix . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRF and GETRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The number of columns of B. + The factored A matrix. + The order of the square matrix . + The pivot indices of . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRS LAPACK routine. + + + + Computes the Cholesky factorization of A. + + On entry, a square, positive definite matrix. On exit, the matrix is overwritten with the + the Cholesky factorization. + The number of rows or columns in the matrix. + This is equivalent to the POTRF LAPACK routine. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves A*X=B for X using Cholesky factorization. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRF add POTRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. + The number of rows and columns in A. + The B matrix. + The number of columns in the B matrix. + This is equivalent to the POTRS LAPACK routine. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. Has to be different than . + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The column to solve for. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the R matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A M by M matrix that holds the Q matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the Q matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A N by N matrix that holds the R matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Perform calculation of Q or R + + Work array + Index of column in work array + Q or R matrices + The first row in + The last row + The first column + The last column + Number of available CPUs + + + + Generate column from initial matrix to work array + + Work array + Initial matrix + The number of rows in matrix + The first row + Column index + + + + Solves A*X=B for X using QR factorization of A. + + The A matrix. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Solves A*X=B for X using a previously QR factored matrix. + + The Q matrix obtained by calling . + The R matrix obtained by calling . + The number of rows in the A matrix. + The number of columns in the A matrix. + Contains additional information on Q. Only used for the native solver + and can be null for the managed provider. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Computes the singular value decomposition of A. + + Compute the singular U and VT vectors or not. + On entry, the M by N matrix to decompose. On exit, A may be overwritten. + The number of rows in the A matrix. + The number of columns in the A matrix. + The singular values of A in ascending value. + If is true, on exit U contains the left + singular vectors. + If is true, on exit VT contains the transposed + right singular vectors. + This is equivalent to the GESVD LAPACK routine. + + + + Solves A*X=B for X using the singular value decomposition of A. + + On entry, the M by N matrix to decompose. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Solves A*X=B for X using a previously SVD decomposed matrix. + + The number of rows in the A matrix. + The number of columns in the A matrix. + The s values returned by . + The left singular vectors returned by . + The right singular vectors returned by . + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Computes the eigenvalues and eigenvectors of a matrix. + + Whether the matrix is symmetric or not. + The order of the matrix. + The matrix to decompose. The lenth of the array must be order * order. + On output, the matrix contains the eigen vectors. The lenth of the array must be order * order. + On output, the eigen values (λ) of matrix in ascending value. The length of the arry must . + On output, the block diagonal eigenvalue matrix. The lenth of the array must be order * order. + + + + Assumes that and have already been transposed. + + + + + Assumes that and have already been transposed. + + + + + Adds a scaled vector to another: result = y + alpha*x. + + The vector to update. + The value to scale by. + The vector to add to . + The result of the addition. + This is similar to the AXPY BLAS routine. + + + + Scales an array. Can be used to scale a vector and a matrix. + + The scalar. + The values to scale. + This result of the scaling. + This is similar to the SCAL BLAS routine. + + + + Conjugates an array. Can be used to conjugate a vector and a matrix. + + The values to conjugate. + This result of the conjugation. + + + + Computes the dot product of x and y. + + The vector x. + The vector y. + The dot product of x and y. + This is equivalent to the DOT BLAS routine. + + + + Does a point wise add of two arrays z = x + y. This can be used + to add vectors or matrices. + + The array x. + The array y. + The result of the addition. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise subtraction of two arrays z = x - y. This can be used + to subtract vectors or matrices. + + The array x. + The array y. + The result of the subtraction. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise multiplication of two arrays z = x * y. This can be used + to multiple elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise multiplication. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise division of two arrays z = x / y. This can be used + to divide elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise division. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise power of two arrays z = x ^ y. This can be used + to raise elements of vectors or matrices to the powers of another vector or matrix. + + The array x. + The array y. + The result of the point wise power. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Computes the requested of the matrix. + + The type of norm to compute. + The number of rows. + The number of columns. + The matrix to compute the norm from. + The requested of the matrix. + + + + Multiples two matrices. result = x * y + + The x matrix. + The number of rows in the x matrix. + The number of columns in the x matrix. + The y matrix. + The number of rows in the y matrix. + The number of columns in the y matrix. + Where to store the result of the multiplication. + This is a simplified version of the BLAS GEMM routine with alpha + set to 1.0 and beta set to 0.0, and x and y are not transposed. + + + + Multiplies two matrices and updates another with the result. c = alpha*op(a)*op(b) + beta*c + + How to transpose the matrix. + How to transpose the matrix. + The value to scale matrix. + The a matrix. + The number of rows in the matrix. + The number of columns in the matrix. + The b matrix + The number of rows in the matrix. + The number of columns in the matrix. + The value to scale the matrix. + The c matrix. + + + + Cache-Oblivious Matrix Multiplication + + if set to true transpose matrix A. + if set to true transpose matrix B. + The value to scale the matrix A with. + The matrix A. + Row-shift of the left matrix + Column-shift of the left matrix + The matrix B. + Row-shift of the right matrix + Column-shift of the right matrix + The matrix C. + Row-shift of the result matrix + Column-shift of the result matrix + The number of rows of matrix op(A) and of the matrix C. + The number of columns of matrix op(B) and of the matrix C. + The number of columns of matrix op(A) and the rows of the matrix op(B). + The constant number of rows of matrix op(A) and of the matrix C. + The constant number of columns of matrix op(B) and of the matrix C. + The constant number of columns of matrix op(A) and the rows of the matrix op(B). + Indicates if this is the first recursion. + + + + Computes the LUP factorization of A. P*A = L*U. + + An by matrix. The matrix is overwritten with the + the LU factorization on exit. The lower triangular factor L is stored in under the diagonal of (the diagonal is always 1.0 + for the L factor). The upper triangular factor U is stored on and above the diagonal of . + The order of the square matrix . + On exit, it contains the pivot indices. The size of the array must be . + This is equivalent to the GETRF LAPACK routine. + + + + Computes the inverse of matrix using LU factorization. + + The N by N matrix to invert. Contains the inverse On exit. + The order of the square matrix . + This is equivalent to the GETRF and GETRI LAPACK routines. + + + + Computes the inverse of a previously factored matrix. + + The LU factored N by N matrix. Contains the inverse On exit. + The order of the square matrix . + The pivot indices of . + This is equivalent to the GETRI LAPACK routine. + + + + Solves A*X=B for X using LU factorization. + + The number of columns of B. + The square matrix A. + The order of the square matrix . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRF and GETRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The number of columns of B. + The factored A matrix. + The order of the square matrix . + The pivot indices of . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRS LAPACK routine. + + + + Computes the Cholesky factorization of A. + + On entry, a square, positive definite matrix. On exit, the matrix is overwritten with the + the Cholesky factorization. + The number of rows or columns in the matrix. + This is equivalent to the POTRF LAPACK routine. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves A*X=B for X using Cholesky factorization. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRF add POTRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRS LAPACK routine. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. Has to be different than . + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The column to solve for. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the R matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A M by M matrix that holds the Q matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the Q matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A N by N matrix that holds the R matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Perform calculation of Q or R + + Work array + Index of column in work array + Q or R matrices + The first row in + The last row + The first column + The last column + Number of available CPUs + + + + Generate column from initial matrix to work array + + Work array + Initial matrix + The number of rows in matrix + The first row + Column index + + + + Solves A*X=B for X using QR factorization of A. + + The A matrix. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Solves A*X=B for X using a previously QR factored matrix. + + The Q matrix obtained by calling . + The R matrix obtained by calling . + The number of rows in the A matrix. + The number of columns in the A matrix. + Contains additional information on Q. Only used for the native solver + and can be null for the managed provider. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Computes the singular value decomposition of A. + + Compute the singular U and VT vectors or not. + On entry, the M by N matrix to decompose. On exit, A may be overwritten. + The number of rows in the A matrix. + The number of columns in the A matrix. + The singular values of A in ascending value. + If is true, on exit U contains the left + singular vectors. + If is true, on exit VT contains the transposed + right singular vectors. + This is equivalent to the GESVD LAPACK routine. + + + + Solves A*X=B for X using the singular value decomposition of A. + + On entry, the M by N matrix to decompose. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Solves A*X=B for X using a previously SVD decomposed matrix. + + The number of rows in the A matrix. + The number of columns in the A matrix. + The s values returned by . + The left singular vectors returned by . + The right singular vectors returned by . + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Computes the eigenvalues and eigenvectors of a matrix. + + Whether the matrix is symmetric or not. + The order of the matrix. + The matrix to decompose. The lenth of the array must be order * order. + On output, the matrix contains the eigen vectors. The lenth of the array must be order * order. + On output, the eigen values (λ) of matrix in ascending value. The length of the arry must . + On output, the block diagonal eigenvalue matrix. The lenth of the array must be order * order. + + + + Assumes that and have already been transposed. + + + + + Assumes that and have already been transposed. + + + + + Try to find out whether the provider is available, at least in principle. + Verification may still fail if available, but it will certainly fail if unavailable. + + + + + Initialize and verify that the provided is indeed available. If not, fall back to alternatives like the managed provider + + + + + Assumes that and have already been transposed. + + + + + Assumes that and have already been transposed. + + + + + Adds a scaled vector to another: result = y + alpha*x. + + The vector to update. + The value to scale by. + The vector to add to . + The result of the addition. + This is similar to the AXPY BLAS routine. + + + + Scales an array. Can be used to scale a vector and a matrix. + + The scalar. + The values to scale. + This result of the scaling. + This is similar to the SCAL BLAS routine. + + + + Conjugates an array. Can be used to conjugate a vector and a matrix. + + The values to conjugate. + This result of the conjugation. + + + + Computes the dot product of x and y. + + The vector x. + The vector y. + The dot product of x and y. + This is equivalent to the DOT BLAS routine. + + + + Does a point wise add of two arrays z = x + y. This can be used + to add vectors or matrices. + + The array x. + The array y. + The result of the addition. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise subtraction of two arrays z = x - y. This can be used + to subtract vectors or matrices. + + The array x. + The array y. + The result of the subtraction. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise multiplication of two arrays z = x * y. This can be used + to multiple elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise multiplication. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise division of two arrays z = x / y. This can be used + to divide elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise division. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise power of two arrays z = x ^ y. This can be used + to raise elements of vectors or matrices to the powers of another vector or matrix. + + The array x. + The array y. + The result of the point wise power. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Computes the requested of the matrix. + + The type of norm to compute. + The number of rows. + The number of columns. + The matrix to compute the norm from. + + The requested of the matrix. + + + + + Multiples two matrices. result = x * y + + The x matrix. + The number of rows in the x matrix. + The number of columns in the x matrix. + The y matrix. + The number of rows in the y matrix. + The number of columns in the y matrix. + Where to store the result of the multiplication. + This is a simplified version of the BLAS GEMM routine with alpha + set to 1.0 and beta set to 0.0, and x and y are not transposed. + + + + Multiplies two matrices and updates another with the result. c = alpha*op(a)*op(b) + beta*c + + How to transpose the matrix. + How to transpose the matrix. + The value to scale matrix. + The a matrix. + The number of rows in the matrix. + The number of columns in the matrix. + The b matrix + The number of rows in the matrix. + The number of columns in the matrix. + The value to scale the matrix. + The c matrix. + + + + Cache-Oblivious Matrix Multiplication + + if set to true transpose matrix A. + if set to true transpose matrix B. + The value to scale the matrix A with. + The matrix A. + Row-shift of the left matrix + Column-shift of the left matrix + The matrix B. + Row-shift of the right matrix + Column-shift of the right matrix + The matrix C. + Row-shift of the result matrix + Column-shift of the result matrix + The number of rows of matrix op(A) and of the matrix C. + The number of columns of matrix op(B) and of the matrix C. + The number of columns of matrix op(A) and the rows of the matrix op(B). + The constant number of rows of matrix op(A) and of the matrix C. + The constant number of columns of matrix op(B) and of the matrix C. + The constant number of columns of matrix op(A) and the rows of the matrix op(B). + Indicates if this is the first recursion. + + + + Computes the LUP factorization of A. P*A = L*U. + + An by matrix. The matrix is overwritten with the + the LU factorization on exit. The lower triangular factor L is stored in under the diagonal of (the diagonal is always 1.0 + for the L factor). The upper triangular factor U is stored on and above the diagonal of . + The order of the square matrix . + On exit, it contains the pivot indices. The size of the array must be . + This is equivalent to the GETRF LAPACK routine. + + + + Computes the inverse of matrix using LU factorization. + + The N by N matrix to invert. Contains the inverse On exit. + The order of the square matrix . + This is equivalent to the GETRF and GETRI LAPACK routines. + + + + Computes the inverse of a previously factored matrix. + + The LU factored N by N matrix. Contains the inverse On exit. + The order of the square matrix . + The pivot indices of . + This is equivalent to the GETRI LAPACK routine. + + + + Solves A*X=B for X using LU factorization. + + The number of columns of B. + The square matrix A. + The order of the square matrix . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRF and GETRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The number of columns of B. + The factored A matrix. + The order of the square matrix . + The pivot indices of . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRS LAPACK routine. + + + + Computes the Cholesky factorization of A. + + On entry, a square, positive definite matrix. On exit, the matrix is overwritten with the + the Cholesky factorization. + The number of rows or columns in the matrix. + This is equivalent to the POTRF LAPACK routine. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves A*X=B for X using Cholesky factorization. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRF add POTRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. Has to be different than . + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRS LAPACK routine. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. Has to be different than . + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The column to solve for. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the R matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A M by M matrix that holds the Q matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the Q matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A N by N matrix that holds the R matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Perform calculation of Q or R + + Work array + Index of column in work array + Q or R matrices + The first row in + The last row + The first column + The last column + Number of available CPUs + + + + Generate column from initial matrix to work array + + Work array + Initial matrix + The number of rows in matrix + The first row + Column index + + + + Solves A*X=B for X using QR factorization of A. + + The A matrix. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Solves A*X=B for X using a previously QR factored matrix. + + The Q matrix obtained by calling . + The R matrix obtained by calling . + The number of rows in the A matrix. + The number of columns in the A matrix. + Contains additional information on Q. Only used for the native solver + and can be null for the managed provider. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Computes the singular value decomposition of A. + + Compute the singular U and VT vectors or not. + On entry, the M by N matrix to decompose. On exit, A may be overwritten. + The number of rows in the A matrix. + The number of columns in the A matrix. + The singular values of A in ascending value. + If is true, on exit U contains the left + singular vectors. + If is true, on exit VT contains the transposed + right singular vectors. + This is equivalent to the GESVD LAPACK routine. + + + + Given the Cartesian coordinates (da, db) of a point p, these function return the parameters da, db, c, and s + associated with the Givens rotation that zeros the y-coordinate of the point. + + Provides the x-coordinate of the point p. On exit contains the parameter r associated with the Givens rotation + Provides the y-coordinate of the point p. On exit contains the parameter z associated with the Givens rotation + Contains the parameter c associated with the Givens rotation + Contains the parameter s associated with the Givens rotation + This is equivalent to the DROTG LAPACK routine. + + + + Solves A*X=B for X using the singular value decomposition of A. + + On entry, the M by N matrix to decompose. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Solves A*X=B for X using a previously SVD decomposed matrix. + + The number of rows in the A matrix. + The number of columns in the A matrix. + The s values returned by . + The left singular vectors returned by . + The right singular vectors returned by . + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Computes the eigenvalues and eigenvectors of a matrix. + + Whether the matrix is symmetric or not. + The order of the matrix. + The matrix to decompose. The lenth of the array must be order * order. + On output, the matrix contains the eigen vectors. The lenth of the array must be order * order. + On output, the eigen values (λ) of matrix in ascending value. The length of the arry must . + On output, the block diagonal eigenvalue matrix. The lenth of the array must be order * order. + + + + Adds a scaled vector to another: result = y + alpha*x. + + The vector to update. + The value to scale by. + The vector to add to . + The result of the addition. + This is similar to the AXPY BLAS routine. + + + + Scales an array. Can be used to scale a vector and a matrix. + + The scalar. + The values to scale. + This result of the scaling. + This is similar to the SCAL BLAS routine. + + + + Conjugates an array. Can be used to conjugate a vector and a matrix. + + The values to conjugate. + This result of the conjugation. + + + + Computes the dot product of x and y. + + The vector x. + The vector y. + The dot product of x and y. + This is equivalent to the DOT BLAS routine. + + + + Does a point wise add of two arrays z = x + y. This can be used + to add vectors or matrices. + + The array x. + The array y. + The result of the addition. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise subtraction of two arrays z = x - y. This can be used + to subtract vectors or matrices. + + The array x. + The array y. + The result of the subtraction. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise multiplication of two arrays z = x * y. This can be used + to multiple elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise multiplication. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise division of two arrays z = x / y. This can be used + to divide elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise division. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise power of two arrays z = x ^ y. This can be used + to raise elements of vectors or matrices to the powers of another vector or matrix. + + The array x. + The array y. + The result of the point wise power. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Computes the requested of the matrix. + + The type of norm to compute. + The number of rows. + The number of columns. + The matrix to compute the norm from. + + The requested of the matrix. + + + + + Multiples two matrices. result = x * y + + The x matrix. + The number of rows in the x matrix. + The number of columns in the x matrix. + The y matrix. + The number of rows in the y matrix. + The number of columns in the y matrix. + Where to store the result of the multiplication. + This is a simplified version of the BLAS GEMM routine with alpha + set to 1.0 and beta set to 0.0, and x and y are not transposed. + + + + Multiplies two matrices and updates another with the result. c = alpha*op(a)*op(b) + beta*c + + How to transpose the matrix. + How to transpose the matrix. + The value to scale matrix. + The a matrix. + The number of rows in the matrix. + The number of columns in the matrix. + The b matrix + The number of rows in the matrix. + The number of columns in the matrix. + The value to scale the matrix. + The c matrix. + + + + Cache-Oblivious Matrix Multiplication + + if set to true transpose matrix A. + if set to true transpose matrix B. + The value to scale the matrix A with. + The matrix A. + Row-shift of the left matrix + Column-shift of the left matrix + The matrix B. + Row-shift of the right matrix + Column-shift of the right matrix + The matrix C. + Row-shift of the result matrix + Column-shift of the result matrix + The number of rows of matrix op(A) and of the matrix C. + The number of columns of matrix op(B) and of the matrix C. + The number of columns of matrix op(A) and the rows of the matrix op(B). + The constant number of rows of matrix op(A) and of the matrix C. + The constant number of columns of matrix op(B) and of the matrix C. + The constant number of columns of matrix op(A) and the rows of the matrix op(B). + Indicates if this is the first recursion. + + + + Computes the LUP factorization of A. P*A = L*U. + + An by matrix. The matrix is overwritten with the + the LU factorization on exit. The lower triangular factor L is stored in under the diagonal of (the diagonal is always 1.0 + for the L factor). The upper triangular factor U is stored on and above the diagonal of . + The order of the square matrix . + On exit, it contains the pivot indices. The size of the array must be . + This is equivalent to the GETRF LAPACK routine. + + + + Computes the inverse of matrix using LU factorization. + + The N by N matrix to invert. Contains the inverse On exit. + The order of the square matrix . + This is equivalent to the GETRF and GETRI LAPACK routines. + + + + Computes the inverse of a previously factored matrix. + + The LU factored N by N matrix. Contains the inverse On exit. + The order of the square matrix . + The pivot indices of . + This is equivalent to the GETRI LAPACK routine. + + + + Solves A*X=B for X using LU factorization. + + The number of columns of B. + The square matrix A. + The order of the square matrix . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRF and GETRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The number of columns of B. + The factored A matrix. + The order of the square matrix . + The pivot indices of . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRS LAPACK routine. + + + + Computes the Cholesky factorization of A. + + On entry, a square, positive definite matrix. On exit, the matrix is overwritten with the + the Cholesky factorization. + The number of rows or columns in the matrix. + This is equivalent to the POTRF LAPACK routine. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves A*X=B for X using Cholesky factorization. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRF add POTRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRS LAPACK routine. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. Has to be different than . + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The column to solve for. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the R matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A M by M matrix that holds the Q matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the Q matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A N by N matrix that holds the R matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Perform calculation of Q or R + + Work array + Index of column in work array + Q or R matrices + The first row in + The last row + The first column + The last column + Number of available CPUs + + + + Generate column from initial matrix to work array + + Work array + Initial matrix + The number of rows in matrix + The first row + Column index + + + + Solves A*X=B for X using QR factorization of A. + + The A matrix. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Solves A*X=B for X using a previously QR factored matrix. + + The Q matrix obtained by calling . + The R matrix obtained by calling . + The number of rows in the A matrix. + The number of columns in the A matrix. + Contains additional information on Q. Only used for the native solver + and can be null for the managed provider. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Computes the singular value decomposition of A. + + Compute the singular U and VT vectors or not. + On entry, the M by N matrix to decompose. On exit, A may be overwritten. + The number of rows in the A matrix. + The number of columns in the A matrix. + The singular values of A in ascending value. + If is true, on exit U contains the left + singular vectors. + If is true, on exit VT contains the transposed + right singular vectors. + This is equivalent to the GESVD LAPACK routine. + + + + Given the Cartesian coordinates (da, db) of a point p, these function return the parameters da, db, c, and s + associated with the Givens rotation that zeros the y-coordinate of the point. + + Provides the x-coordinate of the point p. On exit contains the parameter r associated with the Givens rotation + Provides the y-coordinate of the point p. On exit contains the parameter z associated with the Givens rotation + Contains the parameter c associated with the Givens rotation + Contains the parameter s associated with the Givens rotation + This is equivalent to the DROTG LAPACK routine. + + + + Solves A*X=B for X using the singular value decomposition of A. + + On entry, the M by N matrix to decompose. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Solves A*X=B for X using a previously SVD decomposed matrix. + + The number of rows in the A matrix. + The number of columns in the A matrix. + The s values returned by . + The left singular vectors returned by . + The right singular vectors returned by . + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Computes the eigenvalues and eigenvectors of a matrix. + + Whether the matrix is symmetric or not. + The order of the matrix. + The matrix to decompose. The lenth of the array must be order * order. + On output, the matrix contains the eigen vectors. The lenth of the array must be order * order. + On output, the eigen values (λ) of matrix in ascending value. The length of the arry must . + On output, the block diagonal eigenvalue matrix. The lenth of the array must be order * order. + + + + Multiplicative congruential generator using a modulus of 2^31-1 and a multiplier of 1132489760. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + if set to true, the class is thread safe. + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Multiplicative congruential generator using a modulus of 2^59 and a multiplier of 13^13. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + The seed is set to 1, if the zero is used as the seed. + if set to true , the class is thread safe. + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Random number generator using Mersenne Twister 19937 algorithm. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + Uses the value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + if set to true, the class is thread safe. + + + + Default instance, thread-safe. + + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Returns a random 32-bit signed integer greater than or equal to zero and less than + + + + + Fills the elements of a specified array of bytes with random numbers in full range, including zero and 255 (). + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + A 32-bit combined multiple recursive generator with 2 components of order 3. + + Based off of P. L'Ecuyer, "Combined Multiple Recursive Random Number Generators," Operations Research, 44, 5 (1996), 816--822. + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + if set to true, the class is thread safe. + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Represents a Parallel Additive Lagged Fibonacci pseudo-random number generator. + + + The type bases upon the implementation in the + Boost Random Number Library. + It uses the modulus 232 and by default the "lags" 418 and 1279. Some popular pairs are presented on + Wikipedia - Lagged Fibonacci generator. + + + + + Default value for the ShortLag + + + + + Default value for the LongLag + + + + + The multiplier to compute a double-precision floating point number [0, 1) + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + if set to true, the class is thread safe. + The ShortLag value + TheLongLag value + + + + Gets the short lag of the Lagged Fibonacci pseudo-random number generator. + + + + + Gets the long lag of the Lagged Fibonacci pseudo-random number generator. + + + + + Stores an array of random numbers + + + + + Stores an index for the random number array element that will be accessed next. + + + + + Fills the array with new unsigned random numbers. + + + Generated random numbers are 32-bit unsigned integers greater than or equal to 0 + and less than or equal to . + + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Returns a random 32-bit signed integer greater than or equal to zero and less than + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + This class implements extension methods for the System.Random class. The extension methods generate + pseudo-random distributed numbers for types other than double and int32. + + + + + Fills an array with uniform random numbers greater than or equal to 0.0 and less than 1.0. + + The random number generator. + The array to fill with random values. + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns an array of uniform random numbers greater than or equal to 0.0 and less than 1.0. + + The random number generator. + The size of the array to fill. + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns an infinite sequence of uniform random numbers greater than or equal to 0.0 and less than 1.0. + + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns an array of uniform random bytes. + + The random number generator. + The size of the array to fill. + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Fills an array with uniform random 32-bit signed integers greater than or equal to zero and less than . + + The random number generator. + The array to fill with random values. + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Fills an array with uniform random 32-bit signed integers within the specified range. + + The random number generator. + The array to fill with random values. + Lower bound, inclusive. + Upper bound, exclusive. + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns an infinite sequence of uniform random numbers greater than or equal to 0.0 and less than 1.0. + + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns a nonnegative random number less than . + + The random number generator. + + A 64-bit signed integer greater than or equal to 0, and less than ; that is, + the range of return values includes 0 but not . + + + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns a random number of the full Int32 range. + + The random number generator. + + A 32-bit signed integer of the full range, including 0, negative numbers, + and . + + + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns a random number of the full Int64 range. + + The random number generator. + + A 64-bit signed integer of the full range, including 0, negative numbers, + and . + + + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns a nonnegative decimal floating point random number less than 1.0. + + The random number generator. + + A decimal floating point number greater than or equal to 0.0, and less than 1.0; that is, + the range of return values includes 0.0 but not 1.0. + + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns a random boolean. + + The random number generator. + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Provides a time-dependent seed value, matching the default behavior of System.Random. + WARNING: There is no randomness in this seed and quick repeated calls can cause + the same seed value. Do not use for cryptography! + + + + + Provides a seed based on time and unique GUIDs. + WARNING: There is only low randomness in this seed, but at least quick repeated + calls will result in different seed values. Do not use for cryptography! + + + + + Provides a seed based on an internal random number generator (crypto if available), time and unique GUIDs. + WARNING: There is only medium randomness in this seed, but quick repeated + calls will result in different seed values. Do not use for cryptography! + + + + + Base class for random number generators. This class introduces a layer between + and the Math.Net Numerics random number generators to provide thread safety. + When used directly it use the System.Random as random number source. + + + + + Initializes a new instance of the class using + the value of to set whether + the instance is thread safe or not. + + + + + Initializes a new instance of the class. + + if set to true , the class is thread safe. + Thread safe instances are two and half times slower than non-thread + safe classes. + + + + Fills an array with uniform random numbers greater than or equal to 0.0 and less than 1.0. + + The array to fill with random values. + + + + Returns an array of uniform random numbers greater than or equal to 0.0 and less than 1.0. + + The size of the array to fill. + + + + Returns an infinite sequence of uniform random numbers greater than or equal to 0.0 and less than 1.0. + + + + + Returns a random 32-bit signed integer greater than or equal to zero and less than . + + + + + Returns a random number less then a specified maximum. + + The exclusive upper bound of the random number returned. Range: maxExclusive ≥ 1. + A 32-bit signed integer less than . + is zero or negative. + + + + Returns a random number within a specified range. + + The inclusive lower bound of the random number returned. + The exclusive upper bound of the random number returned. Range: maxExclusive > minExclusive. + + A 32-bit signed integer greater than or equal to and less than ; that is, the range of return values includes but not . If equals , is returned. + + is greater than . + + + + Fills an array with random 32-bit signed integers greater than or equal to zero and less than . + + The array to fill with random values. + + + + Returns an array with random 32-bit signed integers greater than or equal to zero and less than . + + The size of the array to fill. + + + + Fills an array with random numbers within a specified range. + + The array to fill with random values. + The exclusive upper bound of the random number returned. Range: maxExclusive ≥ 1. + + + + Returns an array with random 32-bit signed integers within the specified range. + + The size of the array to fill. + The exclusive upper bound of the random number returned. Range: maxExclusive ≥ 1. + + + + Fills an array with random numbers within a specified range. + + The array to fill with random values. + The inclusive lower bound of the random number returned. + The exclusive upper bound of the random number returned. Range: maxExclusive > minExclusive. + + + + Returns an array with random 32-bit signed integers within the specified range. + + The size of the array to fill. + The inclusive lower bound of the random number returned. + The exclusive upper bound of the random number returned. Range: maxExclusive > minExclusive. + + + + Returns an infinite sequence of random 32-bit signed integers greater than or equal to zero and less than . + + + + + Returns an infinite sequence of random numbers within a specified range. + + The inclusive lower bound of the random number returned. + The exclusive upper bound of the random number returned. Range: maxExclusive > minExclusive. + + + + Fills the elements of a specified array of bytes with random numbers. + + An array of bytes to contain random numbers. + is null. + + + + Returns a random number between 0.0 and 1.0. + + A double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Returns a random 32-bit signed integer greater than or equal to zero and less than 2147483647 (). + + + + + Fills the elements of a specified array of bytes with random numbers in full range, including zero and 255 (). + + + + + Returns a random N-bit signed integer greater than or equal to zero and less than 2^N. + N (bit count) is expected to be greater than zero and less than 32 (not verified). + + + + + Returns a random N-bit signed long integer greater than or equal to zero and less than 2^N. + N (bit count) is expected to be greater than zero and less than 64 (not verified). + + + + + Returns a random 32-bit signed integer within the specified range. + + The exclusive upper bound of the random number returned. Range: maxExclusive ≥ 2 (not verified, must be ensured by caller). + + + + Returns a random 32-bit signed integer within the specified range. + + The inclusive lower bound of the random number returned. + The exclusive upper bound of the random number returned. Range: maxExclusive ≥ minExclusive + 2 (not verified, must be ensured by caller). + + + + A random number generator based on the class in the .NET library. + + + + + Construct a new random number generator with a random seed. + + + + + Construct a new random number generator with random seed. + + if set to true , the class is thread safe. + + + + Construct a new random number generator with random seed. + + The seed value. + + + + Construct a new random number generator with random seed. + + The seed value. + if set to true , the class is thread safe. + + + + Default instance, thread-safe. + + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Returns a random 32-bit signed integer greater than or equal to zero and less than + + + + + Returns a random 32-bit signed integer within the specified range. + + The exclusive upper bound of the random number returned. Range: maxExclusive ≥ 2 (not verified, must be ensured by caller). + + + + Returns a random 32-bit signed integer within the specified range. + + The inclusive lower bound of the random number returned. + The exclusive upper bound of the random number returned. Range: maxExclusive ≥ minExclusive + 2 (not verified, must be ensured by caller). + + + + Fills the elements of a specified array of bytes with random numbers in full range, including zero and 255 (). + + + + + Fill an array with uniform random numbers greater than or equal to 0.0 and less than 1.0. + WARNING: potentially very short random sequence length, can generate repeated partial sequences. + + Parallelized on large length, but also supports being called in parallel from multiple threads + + + + Returns an array of uniform random numbers greater than or equal to 0.0 and less than 1.0. + WARNING: potentially very short random sequence length, can generate repeated partial sequences. + + Parallelized on large length, but also supports being called in parallel from multiple threads + + + + Returns an infinite sequence of uniform random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Wichmann-Hill’s 1982 combined multiplicative congruential generator. + + See: Wichmann, B. A. & Hill, I. D. (1982), "Algorithm AS 183: + An efficient and portable pseudo-random number generator". Applied Statistics 31 (1982) 188-190 + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + The seed is set to 1, if the zero is used as the seed. + if set to true , the class is thread safe. + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Wichmann-Hill’s 2006 combined multiplicative congruential generator. + + See: Wichmann, B. A. & Hill, I. D. (2006), "Generating good pseudo-random numbers". + Computational Statistics & Data Analysis 51:3 (2006) 1614-1622 + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + The seed is set to 1, if the zero is used as the seed. + if set to true , the class is thread safe. + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Implements a multiply-with-carry Xorshift pseudo random number generator (RNG) specified in Marsaglia, George. (2003). Xorshift RNGs. + Xn = a * Xn−3 + c mod 2^32 + http://www.jstatsoft.org/v08/i14/paper + + + + + The default value for X1. + + + + + The default value for X2. + + + + + The default value for the multiplier. + + + + + The default value for the carry over. + + + + + The multiplier to compute a double-precision floating point number [0, 1) + + + + + Seed or last but three unsigned random number. + + + + + Last but two unsigned random number. + + + + + Last but one unsigned random number. + + + + + The value of the carry over. + + + + + The multiplier. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + Uses the default values of: + + a = 916905990 + c = 13579 + X1 = 77465321 + X2 = 362436069 + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + The multiply value + The initial carry value. + The initial value if X1. + The initial value if X2. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + Note: must be less than . + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + Uses the default values of: + + a = 916905990 + c = 13579 + X1 = 77465321 + X2 = 362436069 + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + The multiply value + The initial carry value. + The initial value if X1. + The initial value if X2. + must be less than . + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + Uses the default values of: + + a = 916905990 + c = 13579 + X1 = 77465321 + X2 = 362436069 + + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + The multiply value + The initial carry value. + The initial value if X1. + The initial value if X2. + must be less than . + + + + Initializes a new instance of the class. + + The seed value. + if set to true, the class is thread safe. + + Uses the default values of: + + a = 916905990 + c = 13579 + X1 = 77465321 + X2 = 362436069 + + + + + Initializes a new instance of the class. + + The seed value. + if set to true, the class is thread safe. + The multiply value + The initial carry value. + The initial value if X1. + The initial value if X2. + must be less than . + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Returns a random 32-bit signed integer greater than or equal to zero and less than + + + + + Fills the elements of a specified array of bytes with random numbers in full range, including zero and 255 (). + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Bisection root-finding algorithm. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + Guess for the low value of the range where the root is supposed to be. Will be expanded if needed. + Guess for the high value of the range where the root is supposed to be. Will be expanded if needed. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Factor at which to expand the bounds, if needed. Default 1.6. + Maximum number of expand iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy for both the root and the function value at the root. The root will be refined until the accuracy or the maximum number of iterations is reached. + Maximum number of iterations. Usually 100. + The root that was found, if any. Undefined if the function returns false. + True if a root with the specified accuracy was found, else false. + + + + Algorithm by by Brent, Van Wijngaarden, Dekker et al. + Implementation inspired by Press, Teukolsky, Vetterling, and Flannery, "Numerical Recipes in C", 2nd edition, Cambridge University Press + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + Guess for the low value of the range where the root is supposed to be. Will be expanded if needed. + Guess for the high value of the range where the root is supposed to be. Will be expanded if needed. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Factor at which to expand the bounds, if needed. Default 1.6. + Maximum number of expand iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. + Maximum number of iterations. Usually 100. + The root that was found, if any. Undefined if the function returns false. + True if a root with the specified accuracy was found, else false. + + + Helper method useful for preventing rounding errors. + a*sign(b) + + + + Algorithm by Broyden. + Implementation inspired by Press, Teukolsky, Vetterling, and Flannery, "Numerical Recipes in C", 2nd edition, Cambridge University Press + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + Initial guess of the root. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + Initial guess of the root. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. + Maximum number of iterations. Usually 100. + The root that was found, if any. Undefined if the function returns false. + True if a root with the specified accuracy was found, else false. + + + + Helper method to calculate an approximation of the Jacobian. + + The function. + The argument (initial guess). + The result (of initial guess). + + + + Finds roots to the cubic equation x^3 + a2*x^2 + a1*x + a0 = 0 + Implements the cubic formula in http://mathworld.wolfram.com/CubicFormula.html + + + + + Q and R are transformed variables. + + + + + n^(1/3) - work around a negative double raised to (1/3) + + + + + Find all real-valued roots of the cubic equation a0 + a1*x + a2*x^2 + x^3 = 0. + Note the special coefficient order ascending by exponent (consistent with polynomials). + + + + + Find all three complex roots of the cubic equation d + c*x + b*x^2 + a*x^3 = 0. + Note the special coefficient order ascending by exponent (consistent with polynomials). + + + + + Pure Newton-Raphson root-finding algorithm without any recovery measures in cases it behaves badly. + The algorithm aborts immediately if the root leaves the bound interval. + + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first derivative of the function to find roots from. + The low value of the range where the root is supposed to be. Aborts if it leaves the interval. + The high value of the range where the root is supposed to be. Aborts if it leaves the interval. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first derivative of the function to find roots from. + Initial guess of the root. + The low value of the range where the root is supposed to be. Aborts if it leaves the interval. Default MinValue. + The high value of the range where the root is supposed to be. Aborts if it leaves the interval. Default MaxValue. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first derivative of the function to find roots from. + Initial guess of the root. + The low value of the range where the root is supposed to be. Aborts if it leaves the interval. + The high value of the range where the root is supposed to be. Aborts if it leaves the interval. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Example: 1e-14. + Maximum number of iterations. Example: 100. + The root that was found, if any. Undefined if the function returns false. + True if a root with the specified accuracy was found, else false. + + + + Robust Newton-Raphson root-finding algorithm that falls back to bisection when overshooting or converging too slow, or to subdivision on lacking bracketing. + + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first derivative of the function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + How many parts an interval should be split into for zero crossing scanning in case of lacking bracketing. Default 20. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first derivative of the function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Example: 1e-14. + Maximum number of iterations. Example: 100. + How many parts an interval should be split into for zero crossing scanning in case of lacking bracketing. Example: 20. + The root that was found, if any. Undefined if the function returns false. + True if a root with the specified accuracy was found, else false. + + + + Pure Secant root-finding algorithm without any recovery measures in cases it behaves badly. + The algorithm aborts immediately if the root leaves the bound interval. + + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first guess of the root within the bounds specified. + The second guess of the root within the bounds specified. + The low value of the range where the root is supposed to be. Aborts if it leaves the interval. Default MinValue. + The high value of the range where the root is supposed to be. Aborts if it leaves the interval. Default MaxValue. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first guess of the root within the bounds specified. + The second guess of the root within the bounds specified. + The low value of the range where the root is supposed to be. Aborts if it leaves the interval. + The low value of the range where the root is supposed to be. Aborts if it leaves the interval. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Example: 1e-14. + Maximum number of iterations. Example: 100. + The root that was found, if any. Undefined if the function returns false. + True if a root with the specified accuracy was found, else false + + + Detect a range containing at least one root. + The function to detect roots from. + Lower value of the range. + Upper value of the range + The growing factor of research. Usually 1.6. + Maximum number of iterations. Usually 50. + True if the bracketing operation succeeded, false otherwise. + This iterative methods stops when two values with opposite signs are found. + + + + Sorting algorithms for single, tuple and triple lists. + + + + + Sort a list of keys, in place using the quick sort algorithm using the quick sort algorithm. + + The type of elements in the key list. + List to sort. + Comparison, defining the sort order. + + + + Sort a list of keys and items with respect to the keys, in place using the quick sort algorithm. + + The type of elements in the key list. + The type of elements in the item list. + List to sort. + List to permute the same way as the key list. + Comparison, defining the sort order. + + + + Sort a list of keys, items1 and items2 with respect to the keys, in place using the quick sort algorithm. + + The type of elements in the key list. + The type of elements in the first item list. + The type of elements in the second item list. + List to sort. + First list to permute the same way as the key list. + Second list to permute the same way as the key list. + Comparison, defining the sort order. + + + + Sort a range of a list of keys, in place using the quick sort algorithm. + + The type of element in the list. + List to sort. + The zero-based starting index of the range to sort. + The length of the range to sort. + Comparison, defining the sort order. + + + + Sort a list of keys and items with respect to the keys, in place using the quick sort algorithm. + + The type of elements in the key list. + The type of elements in the item list. + List to sort. + List to permute the same way as the key list. + The zero-based starting index of the range to sort. + The length of the range to sort. + Comparison, defining the sort order. + + + + Sort a list of keys, items1 and items2 with respect to the keys, in place using the quick sort algorithm. + + The type of elements in the key list. + The type of elements in the first item list. + The type of elements in the second item list. + List to sort. + First list to permute the same way as the key list. + Second list to permute the same way as the key list. + The zero-based starting index of the range to sort. + The length of the range to sort. + Comparison, defining the sort order. + + + + Sort a list of keys and items with respect to the keys, in place using the quick sort algorithm. + + The type of elements in the primary list. + The type of elements in the secondary list. + List to sort. + List to sort on duplicate primary items, and permute the same way as the key list. + Comparison, defining the primary sort order. + Comparison, defining the secondary sort order. + + + + Recursive implementation for an in place quick sort on a list. + + The type of the list on which the quick sort is performed. + The list which is sorted using quick sort. + The method with which to compare two elements of the quick sort. + The left boundary of the quick sort. + The right boundary of the quick sort. + + + + Recursive implementation for an in place quick sort on a list while reordering one other list accordingly. + + The type of the list on which the quick sort is performed. + The type of the list which is automatically reordered accordingly. + The list which is sorted using quick sort. + The list which is automatically reordered accordingly. + The method with which to compare two elements of the quick sort. + The left boundary of the quick sort. + The right boundary of the quick sort. + + + + Recursive implementation for an in place quick sort on one list while reordering two other lists accordingly. + + The type of the list on which the quick sort is performed. + The type of the first list which is automatically reordered accordingly. + The type of the second list which is automatically reordered accordingly. + The list which is sorted using quick sort. + The first list which is automatically reordered accordingly. + The second list which is automatically reordered accordingly. + The method with which to compare two elements of the quick sort. + The left boundary of the quick sort. + The right boundary of the quick sort. + + + + Recursive implementation for an in place quick sort on the primary and then by the secondary list while reordering one secondary list accordingly. + + The type of the primary list. + The type of the secondary list. + The list which is sorted using quick sort. + The list which is sorted secondarily (on primary duplicates) and automatically reordered accordingly. + The method with which to compare two elements of the primary list. + The method with which to compare two elements of the secondary list. + The left boundary of the quick sort. + The right boundary of the quick sort. + + + + Performs an in place swap of two elements in a list. + + The type of elements stored in the list. + The list in which the elements are stored. + The index of the first element of the swap. + The index of the second element of the swap. + + + + This partial implementation of the SpecialFunctions class contains all methods related to the error function. + + + This partial implementation of the SpecialFunctions class contains all methods related to the harmonic function. + + + This partial implementation of the SpecialFunctions class contains all methods related to the logistic function. + + + This partial implementation of the SpecialFunctions class contains all methods related to the modified bessel function. + + + This partial implementation of the SpecialFunctions class contains all methods related to the modified bessel function. + + + + + Computes the logarithm of the Euler Beta function. + + The first Beta parameter, a positive real number. + The second Beta parameter, a positive real number. + The logarithm of the Euler Beta function evaluated at z,w. + If or are not positive. + + + + Computes the Euler Beta function. + + The first Beta parameter, a positive real number. + The second Beta parameter, a positive real number. + The Euler Beta function evaluated at z,w. + If or are not positive. + + + + Returns the lower incomplete (unregularized) beta function + B(a,b,x) = int(t^(a-1)*(1-t)^(b-1),t=0..x) for real a > 0, b > 0, 1 >= x >= 0. + + The first Beta parameter, a positive real number. + The second Beta parameter, a positive real number. + The upper limit of the integral. + The lower incomplete (unregularized) beta function. + + + + Returns the regularized lower incomplete beta function + I_x(a,b) = 1/Beta(a,b) * int(t^(a-1)*(1-t)^(b-1),t=0..x) for real a > 0, b > 0, 1 >= x >= 0. + + The first Beta parameter, a positive real number. + The second Beta parameter, a positive real number. + The upper limit of the integral. + The regularized lower incomplete beta function. + + + + ************************************** + COEFFICIENTS FOR METHOD ErfImp * + ************************************** + + Polynomial coefficients for a numerator of ErfImp + calculation for Erf(x) in the interval [1e-10, 0.5]. + + + + Polynomial coefficients for adenominator of ErfImp + calculation for Erf(x) in the interval [1e-10, 0.5]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [0.5, 0.75]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [0.5, 0.75]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [0.75, 1.25]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [0.75, 1.25]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [1.25, 2.25]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [1.25, 2.25]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [2.25, 3.5]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [2.25, 3.5]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [3.5, 5.25]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [3.5, 5.25]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [5.25, 8]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [5.25, 8]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [8, 11.5]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [8, 11.5]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [11.5, 17]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [11.5, 17]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [17, 24]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [17, 24]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [24, 38]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [24, 38]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [38, 60]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [38, 60]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [60, 85]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [60, 85]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [85, 110]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [85, 110]. + + + + + ************************************** + COEFFICIENTS FOR METHOD ErfInvImp * + ************************************** + + Polynomial coefficients for a numerator of ErfInvImp + calculation for Erf^-1(z) in the interval [0, 0.5]. + + + + Polynomial coefficients for a denominator of ErfInvImp + calculation for Erf^-1(z) in the interval [0, 0.5]. + + + + Polynomial coefficients for a numerator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.5, 0.75]. + + + + Polynomial coefficients for a denominator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.5, 0.75]. + + + + Polynomial coefficients for a numerator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x less than 3. + + + + Polynomial coefficients for a denominator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x less than 3. + + + + Polynomial coefficients for a numerator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x between 3 and 6. + + + + Polynomial coefficients for a denominator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x between 3 and 6. + + + + Polynomial coefficients for a numerator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x between 6 and 18. + + + + Polynomial coefficients for a denominator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x between 6 and 18. + + + + Polynomial coefficients for a numerator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x between 18 and 44. + + + + Polynomial coefficients for a denominator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x between 18 and 44. + + + + Polynomial coefficients for a numerator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x greater than 44. + + + + Polynomial coefficients for a denominator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x greater than 44. + + + + Calculates the error function. + The value to evaluate. + the error function evaluated at given value. + + + returns 1 if x == double.PositiveInfinity. + returns -1 if x == double.NegativeInfinity. + + + + + Calculates the complementary error function. + The value to evaluate. + the complementary error function evaluated at given value. + + + returns 0 if x == double.PositiveInfinity. + returns 2 if x == double.NegativeInfinity. + + + + + Calculates the inverse error function evaluated at z. + The inverse error function evaluated at given value. + + + returns double.PositiveInfinity if z >= 1.0. + returns double.NegativeInfinity if z <= -1.0. + + + Calculates the inverse error function evaluated at z. + value to evaluate. + the inverse error function evaluated at Z. + + + + Implementation of the error function. + + Where to evaluate the error function. + Whether to compute 1 - the error function. + the error function. + + + Calculates the complementary inverse error function evaluated at z. + The complementary inverse error function evaluated at given value. + We have tested this implementation against the arbitrary precision mpmath library + and found cases where we can only guarantee 9 significant figures correct. + + returns double.PositiveInfinity if z <= 0.0. + returns double.NegativeInfinity if z >= 2.0. + + + calculates the complementary inverse error function evaluated at z. + value to evaluate. + the complementary inverse error function evaluated at Z. + + + + The implementation of the inverse error function. + + First intermediate parameter. + Second intermediate parameter. + Third intermediate parameter. + the inverse error function. + + + + Computes the generalized Exponential Integral function (En). + + The argument of the Exponential Integral function. + Integer power of the denominator term. Generalization index. + The value of the Exponential Integral function. + + This implementation of the computation of the Exponential Integral function follows the derivation in + "Handbook of Mathematical Functions, Applied Mathematics Series, Volume 55", Abramowitz, M., and Stegun, I.A. 1964, reprinted 1968 by + Dover Publications, New York), Chapters 6, 7, and 26. + AND + "Advanced mathematical methods for scientists and engineers", Bender, Carl M.; Steven A. Orszag (1978). page 253 + + + for x > 1 uses continued fraction approach that is often used to compute incomplete gamma. + for 0 < x <= 1 uses Taylor series expansion + + Our unit tests suggest that the accuracy of the Exponential Integral function is correct up to 13 floating point digits. + + + + + Initializes static members of the SpecialFunctions class. + + + + + Computes the factorial function x -> x! of an integer number > 0. The function can represent all number up + to 22! exactly, all numbers up to 170! using a double representation. All larger values will overflow. + + A value value! for value > 0 + + If you need to multiply or divide various such factorials, consider using the logarithmic version + instead so you can add instead of multiply and subtract instead of divide, and + then exponentiate the result using . This will also circumvent the problem that + factorials become very large even for small parameters. + + + + + + Computes the logarithmic factorial function x -> ln(x!) of an integer number > 0. + + A value value! for value > 0 + + + + Computes the binomial coefficient: n choose k. + + A nonnegative value n. + A nonnegative value h. + The binomial coefficient: n choose k. + + + + Computes the natural logarithm of the binomial coefficient: ln(n choose k). + + A nonnegative value n. + A nonnegative value h. + The logarithmic binomial coefficient: ln(n choose k). + + + + Computes the multinomial coefficient: n choose n1, n2, n3, ... + + A nonnegative value n. + An array of nonnegative values that sum to . + The multinomial coefficient. + if is . + If or any of the are negative. + If the sum of all is not equal to . + + + + The order of the approximation. + + + + + Auxiliary variable when evaluating the function. + + + + + Polynomial coefficients for the approximation. + + + + + Computes the logarithm of the Gamma function. + + The argument of the gamma function. + The logarithm of the gamma function. + + This implementation of the computation of the gamma and logarithm of the gamma function follows the derivation in + "An Analysis Of The Lanczos Gamma Approximation", Glendon Ralph Pugh, 2004. + We use the implementation listed on p. 116 which achieves an accuracy of 16 floating point digits. Although 16 digit accuracy + should be sufficient for double values, improving accuracy is possible (see p. 126 in Pugh). + Our unit tests suggest that the accuracy of the Gamma function is correct up to 14 floating point digits. + + + + + Computes the Gamma function. + + The argument of the gamma function. + The logarithm of the gamma function. + + + This implementation of the computation of the gamma and logarithm of the gamma function follows the derivation in + "An Analysis Of The Lanczos Gamma Approximation", Glendon Ralph Pugh, 2004. + We use the implementation listed on p. 116 which should achieve an accuracy of 16 floating point digits. Although 16 digit accuracy + should be sufficient for double values, improving accuracy is possible (see p. 126 in Pugh). + + Our unit tests suggest that the accuracy of the Gamma function is correct up to 13 floating point digits. + + + + + Returns the upper incomplete regularized gamma function + Q(a,x) = 1/Gamma(a) * int(exp(-t)t^(a-1),t=0..x) for real a > 0, x > 0. + + The argument for the gamma function. + The lower integral limit. + The upper incomplete regularized gamma function. + + + + Returns the upper incomplete gamma function + Gamma(a,x) = int(exp(-t)t^(a-1),t=0..x) for real a > 0, x > 0. + + The argument for the gamma function. + The lower integral limit. + The upper incomplete gamma function. + + + + Returns the lower incomplete gamma function + gamma(a,x) = int(exp(-t)t^(a-1),t=0..x) for real a > 0, x > 0. + + The argument for the gamma function. + The upper integral limit. + The lower incomplete gamma function. + + + + Returns the lower incomplete regularized gamma function + P(a,x) = 1/Gamma(a) * int(exp(-t)t^(a-1),t=0..x) for real a > 0, x > 0. + + The argument for the gamma function. + The upper integral limit. + The lower incomplete gamma function. + + + + Returns the inverse P^(-1) of the regularized lower incomplete gamma function + P(a,x) = 1/Gamma(a) * int(exp(-t)t^(a-1),t=0..x) for real a > 0, x > 0, + such that P^(-1)(a,P(a,x)) == x. + + + + + Computes the Digamma function which is mathematically defined as the derivative of the logarithm of the gamma function. + This implementation is based on + Jose Bernardo + Algorithm AS 103: + Psi ( Digamma ) Function, + Applied Statistics, + Volume 25, Number 3, 1976, pages 315-317. + Using the modifications as in Tom Minka's lightspeed toolbox. + + The argument of the digamma function. + The value of the DiGamma function at . + + + + Computes the inverse Digamma function: this is the inverse of the logarithm of the gamma function. This function will + only return solutions that are positive. + This implementation is based on the bisection method. + + The argument of the inverse digamma function. + The positive solution to the inverse DiGamma function at . + + + + Computes the 'th Harmonic number. + + The Harmonic number which needs to be computed. + The t'th Harmonic number. + + + + Compute the generalized harmonic number of order n of m. (1 + 1/2^m + 1/3^m + ... + 1/n^m) + + The order parameter. + The power parameter. + General Harmonic number. + + + + Computes the logistic function. see: http://en.wikipedia.org/wiki/Logistic + + The parameter for which to compute the logistic function. + The logistic function of . + + + + Computes the logit function, the inverse of the sigmoid logistic function. see: http://en.wikipedia.org/wiki/Logit + + The parameter for which to compute the logit function. This number should be + between 0 and 1. + The logarithm of divided by 1.0 - . + + + + ************************************** + COEFFICIENTS FOR METHODS bessi0 * + ************************************** + + Chebyshev coefficients for exp(-x) I0(x) + in the interval [0, 8]. + + lim(x->0){ exp(-x) I0(x) } = 1. + + + + Chebyshev coefficients for exp(-x) sqrt(x) I0(x) + in the inverted interval [8, infinity]. + + lim(x->inf){ exp(-x) sqrt(x) I0(x) } = 1/sqrt(2pi). + + + + + ************************************** + COEFFICIENTS FOR METHODS bessi1 * + ************************************** + + Chebyshev coefficients for exp(-x) I1(x) / x + in the interval [0, 8]. + + lim(x->0){ exp(-x) I1(x) / x } = 1/2. + + + + Chebyshev coefficients for exp(-x) sqrt(x) I1(x) + in the inverted interval [8, infinity]. + + lim(x->inf){ exp(-x) sqrt(x) I1(x) } = 1/sqrt(2pi). + + + + + ************************************** + COEFFICIENTS FOR METHODS bessk0, bessk0e * + ************************************** + + Chebyshev coefficients for K0(x) + log(x/2) I0(x) + in the interval [0, 2]. The odd order coefficients are all + zero; only the even order coefficients are listed. + + lim(x->0){ K0(x) + log(x/2) I0(x) } = -EUL. + + + + Chebyshev coefficients for exp(x) sqrt(x) K0(x) + in the inverted interval [2, infinity]. + + lim(x->inf){ exp(x) sqrt(x) K0(x) } = sqrt(pi/2). + + + + + ************************************** + COEFFICIENTS FOR METHODS bessk1, bessk1e * + ************************************** + + Chebyshev coefficients for x(K1(x) - log(x/2) I1(x)) + in the interval [0, 2]. + + lim(x->0){ x(K1(x) - log(x/2) I1(x)) } = 1. + + + + Chebyshev coefficients for exp(x) sqrt(x) K1(x) + in the interval [2, infinity]. + + lim(x->inf){ exp(x) sqrt(x) K1(x) } = sqrt(pi/2). + + + + Returns the modified Bessel function of first kind, order 0 of the argument. +

+ The function is defined as i0(x) = j0( ix ). +

+ The range is partitioned into the two intervals [0, 8] and + (8, infinity). Chebyshev polynomial expansions are employed + in each interval. +

+ The value to compute the bessel function of. + +
+ + Returns the modified Bessel function of first kind, + order 1 of the argument. +

+ The function is defined as i1(x) = -i j1( ix ). +

+ The range is partitioned into the two intervals [0, 8] and + (8, infinity). Chebyshev polynomial expansions are employed + in each interval. +

+ The value to compute the bessel function of. + +
+ + Returns the modified Bessel function of the second kind + of order 0 of the argument. +

+ The range is partitioned into the two intervals [0, 8] and + (8, infinity). Chebyshev polynomial expansions are employed + in each interval. +

+ The value to compute the bessel function of. + +
+ + Returns the exponentially scaled modified Bessel function + of the second kind of order 0 of the argument. + + The value to compute the bessel function of. + + + + Returns the modified Bessel function of the second kind + of order 1 of the argument. +

+ The range is partitioned into the two intervals [0, 2] and + (2, infinity). Chebyshev polynomial expansions are employed + in each interval. +

+ The value to compute the bessel function of. + +
+ + Returns the exponentially scaled modified Bessel function + of the second kind of order 1 of the argument. +

+ k1e(x) = exp(x) * k1(x). +

+ The value to compute the bessel function of. + +
+ + + Returns the modified Struve function of order 0. + + The value to compute the function of. + + + + Returns the modified Struve function of order 1. + + The value to compute the function of. + + + + Returns the difference between the Bessel I0 and Struve L0 functions. + + The value to compute the function of. + + + + Returns the difference between the Bessel I1 and Struve L1 functions. + + The value to compute the function of. + + + + Numerically stable exponential minus one, i.e. x -> exp(x)-1 + + A number specifying a power. + Returns exp(power)-1. + + + + Numerically stable hypotenuse of a right angle triangle, i.e. (a,b) -> sqrt(a^2 + b^2) + + The length of side a of the triangle. + The length of side b of the triangle. + Returns sqrt(a2 + b2) without underflow/overflow. + + + + Numerically stable hypotenuse of a right angle triangle, i.e. (a,b) -> sqrt(a^2 + b^2) + + The length of side a of the triangle. + The length of side b of the triangle. + Returns sqrt(a2 + b2) without underflow/overflow. + + + + Numerically stable hypotenuse of a right angle triangle, i.e. (a,b) -> sqrt(a^2 + b^2) + + The length of side a of the triangle. + The length of side b of the triangle. + Returns sqrt(a2 + b2) without underflow/overflow. + + + + Numerically stable hypotenuse of a right angle triangle, i.e. (a,b) -> sqrt(a^2 + b^2) + + The length of side a of the triangle. + The length of side b of the triangle. + Returns sqrt(a2 + b2) without underflow/overflow. + + + + Evaluation functions, useful for function approximation. + + + + + Evaluate a polynomial at point x. + Coefficients are ordered by power with power k at index k. + Example: coefficients [3,-1,2] represent y=2x^2-x+3. + + The location where to evaluate the polynomial at. + The coefficients of the polynomial, coefficient for power k at index k. + + + + Evaluate a polynomial at point x. + Coefficients are ordered by power with power k at index k. + Example: coefficients [3,-1,2] represent y=2x^2-x+3. + + The location where to evaluate the polynomial at. + The coefficients of the polynomial, coefficient for power k at index k. + + + + Evaluate a polynomial at point x. + Coefficients are ordered by power with power k at index k. + Example: coefficients [3,-1,2] represent y=2x^2-x+3. + + The location where to evaluate the polynomial at. + The coefficients of the polynomial, coefficient for power k at index k. + + + + Numerically stable series summation + + provides the summands sequentially + Sum + + + Evaluates the series of Chebyshev polynomials Ti at argument x/2. + The series is given by +
+                  N-1
+                   - '
+            y  =   >   coef[i] T (x/2)
+                   -            i
+                  i=0
+            
+ Coefficients are stored in reverse order, i.e. the zero + order term is last in the array. Note N is the number of + coefficients, not the order. +

+ If coefficients are for the interval a to b, x must + have been transformed to x -> 2(2x - b - a)/(b-a) before + entering the routine. This maps x from (a, b) to (-1, 1), + over which the Chebyshev polynomials are defined. +

+ If the coefficients are for the inverted interval, in + which (a, b) is mapped to (1/b, 1/a), the transformation + required is x -> 2(2ab/x - b - a)/(b-a). If b is infinity, + this becomes x -> 4a/x - 1. +

+ SPEED: +

+ Taking advantage of the recurrence properties of the + Chebyshev polynomials, the routine requires one more + addition per loop than evaluating a nested polynomial of + the same degree. +

+ The coefficients of the polynomial. + Argument to the polynomial. + + Reference: https://bpm2.svn.codeplex.com/svn/Common.Numeric/Arithmetic.cs +

+ Marked as Deprecated in + http://people.apache.org/~isabel/mahout_site/mahout-matrix/apidocs/org/apache/mahout/jet/math/Arithmetic.html + + + +

+ Summation of Chebyshev polynomials, using the Clenshaw method with Reinsch modification. + + The no. of terms in the sequence. + The coefficients of the Chebyshev series, length n+1. + The value at which the series is to be evaluated. + + ORIGINAL AUTHOR: + Dr. Allan J. MacLeod; Dept. of Mathematics and Statistics, University of Paisley; High St., PAISLEY, SCOTLAND + REFERENCES: + "An error analysis of the modified Clenshaw method for evaluating Chebyshev and Fourier series" + J. Oliver, J.I.M.A., vol. 20, 1977, pp379-391 + +
+ + + Valley-shaped Rosenbrock function for 2 dimensions: (x,y) -> (1-x)^2 + 100*(y-x^2)^2. + This function has a global minimum at (1,1) with f(1,1) = 0. + Common range: [-5,10] or [-2.048,2.048]. + + + https://en.wikipedia.org/wiki/Rosenbrock_function + http://www.sfu.ca/~ssurjano/rosen.html + + + + + Valley-shaped Rosenbrock function for 2 or more dimensions. + This function have a global minimum of all ones and, for 8 > N > 3, a local minimum at (-1,1,...,1). + + + https://en.wikipedia.org/wiki/Rosenbrock_function + http://www.sfu.ca/~ssurjano/rosen.html + + + + + Himmelblau, a multi-modal function: (x,y) -> (x^2+y-11)^2 + (x+y^2-7)^2 + This function has 4 global minima with f(x,y) = 0. + Common range: [-6,6]. + Named after David Mautner Himmelblau + + + https://en.wikipedia.org/wiki/Himmelblau%27s_function + + + + + Rastrigin, a highly multi-modal function with many local minima. + Global minimum of all zeros with f(0) = 0. + Common range: [-5.12,5.12]. + + + https://en.wikipedia.org/wiki/Rastrigin_function + http://www.sfu.ca/~ssurjano/rastr.html + + + + + Drop-Wave, a multi-modal and highly complex function with many local minima. + Global minimum of all zeros with f(0) = -1. + Common range: [-5.12,5.12]. + + + http://www.sfu.ca/~ssurjano/drop.html + + + + + Ackley, a function with many local minima. It is nearly flat in outer regions but has a large hole at the center. + Global minimum of all zeros with f(0) = 0. + Common range: [-32.768, 32.768]. + + + http://www.sfu.ca/~ssurjano/ackley.html + + + + + Bowl-shaped first Bohachevsky function. + Global minimum of all zeros with f(0,0) = 0. + Common range: [-100, 100] + + + http://www.sfu.ca/~ssurjano/boha.html + + + + + Plate-shaped Matyas function. + Global minimum of all zeros with f(0,0) = 0. + Common range: [-10, 10]. + + + http://www.sfu.ca/~ssurjano/matya.html + + + + + Valley-shaped six-hump camel back function. + Two global minima and four local minima. Global minima with f(x) ) -1.0316 at (0.0898,-0.7126) and (-0.0898,0.7126). + Common range: x in [-3,3], y in [-2,2]. + + + http://www.sfu.ca/~ssurjano/camel6.html + + + + + Statistics operating on arrays assumed to be unsorted. + WARNING: Methods with the Inplace-suffix may modify the data array by reordering its entries. + + + + + + + + Returns the smallest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the smallest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the largest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the largest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the smallest value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the largest value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the smallest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the largest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the geometric mean of the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the harmonic mean of the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population variance from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the population variance from the full population provided as unsorted array. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population standard deviation from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the population standard deviation from the full population provided as unsorted array. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population variance from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN and NaN for variance if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population standard deviation from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN and NaN for standard deviation if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population covariance from the provided two sample arrays. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + First sample array. + Second sample array. + + + + Evaluates the population covariance from the full population provided as two arrays. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + First population array. + Second population array. + + + + Estimates the root mean square (RMS) also known as quadratic mean from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the order statistic (order 1..N) from the unsorted data array. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + One-based order of the statistic, must be between 1 and N (inclusive). + + + + Estimates the median value from the unsorted data array. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the p-Percentile value from the unsorted data array. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Percentile selector, between 0 and 100 (inclusive). + + + + Estimates the first quartile value from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the third quartile value from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the inter-quartile range from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates {min, lower-quantile, median, upper-quantile, max} from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the tau-th quantile from the unsorted data array. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Quantile selector, between 0.0 and 1.0 (inclusive). + + R-8, SciPy-(1/3,1/3): + Linear interpolation of the approximate medians for order statistics. + When tau < (2/3) / (N + 1/3), use x1. When tau >= (N - 1/3) / (N + 1/3), use xN. + + + + + Estimates the tau-th quantile from the unsorted data array. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile defintion can be specified + by 4 parameters a, b, c and d, consistent with Mathematica. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Quantile selector, between 0.0 and 1.0 (inclusive) + a-parameter + b-parameter + c-parameter + d-parameter + + + + Estimates the tau-th quantile from the unsorted data array. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Quantile selector, between 0.0 and 1.0 (inclusive) + Quantile definition, to choose what product/definition it should be consistent with + + + + Evaluates the rank of each entry of the unsorted data array. + The rank definition can be specified to be compatible + with an existing system. + WARNING: Works inplace and can thus causes the data array to be reordered. + + + + + Estimates the arithmetic sample mean from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the geometric mean of the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the harmonic mean of the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population variance from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the population variance from the full population provided as unsorted array. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population standard deviation from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the population standard deviation from the full population provided as unsorted array. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population variance from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN and NaN for variance if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population standard deviation from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN and NaN for standard deviation if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population covariance from the provided two sample arrays. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + First sample array. + Second sample array. + + + + Evaluates the population covariance from the full population provided as two arrays. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + First population array. + Second population array. + + + + Estimates the root mean square (RMS) also known as quadratic mean from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the smallest value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the smallest value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the smallest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the largest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the geometric mean of the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the harmonic mean of the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population variance from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the population variance from the full population provided as unsorted array. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population standard deviation from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the population standard deviation from the full population provided as unsorted array. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population variance from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN and NaN for variance if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population standard deviation from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN and NaN for standard deviation if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population covariance from the provided two sample arrays. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + First sample array. + Second sample array. + + + + Evaluates the population covariance from the full population provided as two arrays. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + First population array. + Second population array. + + + + Estimates the root mean square (RMS) also known as quadratic mean from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the order statistic (order 1..N) from the unsorted data array. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + One-based order of the statistic, must be between 1 and N (inclusive). + + + + Estimates the median value from the unsorted data array. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the p-Percentile value from the unsorted data array. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Percentile selector, between 0 and 100 (inclusive). + + + + Estimates the first quartile value from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the third quartile value from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the inter-quartile range from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates {min, lower-quantile, median, upper-quantile, max} from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the tau-th quantile from the unsorted data array. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Quantile selector, between 0.0 and 1.0 (inclusive). + + R-8, SciPy-(1/3,1/3): + Linear interpolation of the approximate medians for order statistics. + When tau < (2/3) / (N + 1/3), use x1. When tau >= (N - 1/3) / (N + 1/3), use xN. + + + + + Estimates the tau-th quantile from the unsorted data array. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile defintion can be specified + by 4 parameters a, b, c and d, consistent with Mathematica. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Quantile selector, between 0.0 and 1.0 (inclusive) + a-parameter + b-parameter + c-parameter + d-parameter + + + + Estimates the tau-th quantile from the unsorted data array. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Quantile selector, between 0.0 and 1.0 (inclusive) + Quantile definition, to choose what product/definition it should be consistent with + + + + Evaluates the rank of each entry of the unsorted data array. + The rank definition can be specified to be compatible + with an existing system. + WARNING: Works inplace and can thus causes the data array to be reordered. + + + + + A class with correlation measures between two datasets. + + + + + Computes the Pearson Product-Moment Correlation coefficient. + + Sample data A. + Sample data B. + The Pearson product-moment correlation coefficient. + + + + Computes the Weighted Pearson Product-Moment Correlation coefficient. + + Sample data A. + Sample data B. + Corresponding weights of data. + The Weighted Pearson product-moment correlation coefficient. + + + + Computes the Pearson Product-Moment Correlation matrix. + + Array of sample data vectors. + The Pearson product-moment correlation matrix. + + + + Computes the Pearson Product-Moment Correlation matrix. + + Enumerable of sample data vectors. + The Pearson product-moment correlation matrix. + + + + Computes the Spearman Ranked Correlation coefficient. + + Sample data series A. + Sample data series B. + The Spearman ranked correlation coefficient. + + + + Computes the Spearman Ranked Correlation matrix. + + Array of sample data vectors. + The Spearman ranked correlation matrix. + + + + Computes the Spearman Ranked Correlation matrix. + + Enumerable of sample data vectors. + The Spearman ranked correlation matrix. + + + + Computes the basic statistics of data set. The class meets the + NIST standard of accuracy for mean, variance, and standard deviation + (the only statistics they provide exact values for) and exceeds them + in increased accuracy mode. + Recommendation: consider to use RunningStatistics instead. + + + This type declares a DataContract for out of the box ephemeral serialization + with engines like DataContractSerializer, Protocol Buffers and FsPickler, + but does not guarantee any compatibility between versions. + It is not recommended to rely on this mechanism for durable persistance. + + + + + Initializes a new instance of the class. + + The sample data. + + If set to true, increased accuracy mode used. + Increased accuracy mode uses types for internal calculations. + + + Don't use increased accuracy for data sets containing large values (in absolute value). + This may cause the calculations to overflow. + + + + + Initializes a new instance of the class. + + The sample data. + + If set to true, increased accuracy mode used. + Increased accuracy mode uses types for internal calculations. + + + Don't use increased accuracy for data sets containing large values (in absolute value). + This may cause the calculations to overflow. + + + + + Gets the size of the sample. + + The size of the sample. + + + + Gets the sample mean. + + The sample mean. + + + + Gets the unbiased population variance estimator (on a dataset of size N will use an N-1 normalizer). + + The sample variance. + + + + Gets the unbiased population standard deviation (on a dataset of size N will use an N-1 normalizer). + + The sample standard deviation. + + + + Gets the sample skewness. + + The sample skewness. + Returns zero if is less than three. + + + + Gets the sample kurtosis. + + The sample kurtosis. + Returns zero if is less than four. + + + + Gets the maximum sample value. + + The maximum sample value. + + + + Gets the minimum sample value. + + The minimum sample value. + + + + Computes descriptive statistics from a stream of data values. + + A sequence of datapoints. + + + + Computes descriptive statistics from a stream of nullable data values. + + A sequence of datapoints. + + + + Computes descriptive statistics from a stream of data values. + + A sequence of datapoints. + + + + Computes descriptive statistics from a stream of nullable data values. + + A sequence of datapoints. + + + + Internal use. Method use for setting the statistics. + + For setting Mean. + For setting Variance. + For setting Skewness. + For setting Kurtosis. + For setting Minimum. + For setting Maximum. + For setting Count. + + + + A consists of a series of s, + each representing a region limited by a lower bound (exclusive) and an upper bound (inclusive). + + + This type declares a DataContract for out of the box ephemeral serialization + with engines like DataContractSerializer, Protocol Buffers and FsPickler, + but does not guarantee any compatibility between versions. + It is not recommended to rely on this mechanism for durable persistance. + + + + + This IComparer performs comparisons between a point and a bucket. + + + + + Compares a point and a bucket. The point will be encapsulated in a bucket with width 0. + + The first bucket to compare. + The second bucket to compare. + -1 when the point is less than this bucket, 0 when it is in this bucket and 1 otherwise. + + + + Lower Bound of the Bucket. + + + + + Upper Bound of the Bucket. + + + + + The number of datapoints in the bucket. + + + Value may be NaN if this was constructed as a argument. + + + + + Initializes a new instance of the Bucket class. + + + + + Constructs a Bucket that can be used as an argument for a + like when performing a Binary search. + + Value to look for + + + + Creates a copy of the Bucket with the lowerbound, upperbound and counts exactly equal. + + A cloned Bucket object. + + + + Width of the Bucket. + + + + + True if this is a single point argument for + when performing a Binary search. + + + + + Default comparer. + + + + + This method check whether a point is contained within this bucket. + + The point to check. + + 0 if the point falls within the bucket boundaries; + -1 if the point is smaller than the bucket, + +1 if the point is larger than the bucket. + + + + Comparison of two disjoint buckets. The buckets cannot be overlapping. + + + 0 if UpperBound and LowerBound are bit-for-bit equal + 1 if This bucket is lower that the compared bucket + -1 otherwise + + + + + Checks whether two Buckets are equal. + + + UpperBound and LowerBound are compared bit-for-bit, but This method tolerates a + difference in Count given by . + + + + + Provides a hash code for this bucket. + + + + + Formats a human-readable string for this bucket. + + + + + A class which computes histograms of data. + + + + + Contains all the Buckets of the Histogram. + + + + + Indicates whether the elements of buckets are currently sorted. + + + + + Initializes a new instance of the Histogram class. + + + + + Constructs a Histogram with a specific number of equally sized buckets. The upper and lower bound of the histogram + will be set to the smallest and largest datapoint. + + The datasequence to build a histogram on. + The number of buckets to use. + + + + Constructs a Histogram with a specific number of equally sized buckets. + + The datasequence to build a histogram on. + The number of buckets to use. + The histogram lower bound. + The histogram upper bound. + + + + Add one data point to the histogram. If the datapoint falls outside the range of the histogram, + the lowerbound or upperbound will automatically adapt. + + The datapoint which we want to add. + + + + Add a sequence of data point to the histogram. If the datapoint falls outside the range of the histogram, + the lowerbound or upperbound will automatically adapt. + + The sequence of datapoints which we want to add. + + + + Adds a Bucket to the Histogram. + + + + + Sort the buckets if needed. + + + + + Returns the Bucket that contains the value v. + + The point to search the bucket for. + A copy of the bucket containing point . + + + + Returns the index in the Histogram of the Bucket + that contains the value v. + + The point to search the bucket index for. + The index of the bucket containing the point. + + + + Returns the lower bound of the histogram. + + + + + Returns the upper bound of the histogram. + + + + + Gets the n'th bucket. + + The index of the bucket to be returned. + A copy of the n'th bucket. + + + + Gets the number of buckets. + + + + + Gets the total number of datapoints in the histogram. + + + + + Prints the buckets contained in the . + + + + + A hybrid Monte Carlo sampler for multivariate distributions. + + + + + Number of parameters in the density function. + + + + + Distribution to sample momentum from. + + + + + Standard deviations used in the sampling of different components of the + momentum. + + + + + Gets or sets the standard deviations used in the sampling of different components of the + momentum. + + When the length of pSdv is not the same as Length. + + + + Constructs a new Hybrid Monte Carlo sampler for a multivariate probability distribution. + The components of the momentum will be sampled from a normal distribution with standard deviation + 1 using the default random + number generator. A three point estimation will be used for differentiation. + This constructor will set the burn interval. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + When the number of burnInterval iteration is negative. + + + + Constructs a new Hybrid Monte Carlo sampler for a multivariate probability distribution. + The components of the momentum will be sampled from a normal distribution with standard deviation + specified by pSdv using the default random + number generator. A three point estimation will be used for differentiation. + This constructor will set the burn interval. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + The standard deviations of the normal distributions that are used to sample + the components of the momentum. + When the number of burnInterval iteration is negative. + + + + Constructs a new Hybrid Monte Carlo sampler for a multivariate probability distribution. + The components of the momentum will be sampled from a normal distribution with standard deviation + specified by pSdv using the a random number generator provided by the user. + A three point estimation will be used for differentiation. + This constructor will set the burn interval. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + The standard deviations of the normal distributions that are used to sample + the components of the momentum. + Random number generator used for sampling the momentum. + When the number of burnInterval iteration is negative. + + + + Constructs a new Hybrid Monte Carlo sampler for a multivariate probability distribution. + The components of the momentum will be sampled from a normal distribution with standard deviations + given by pSdv. This constructor will set the burn interval, the method used for + numerical differentiation and the random number generator. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + The standard deviations of the normal distributions that are used to sample + the components of the momentum. + Random number generator used for sampling the momentum. + The method used for numerical differentiation. + When the number of burnInterval iteration is negative. + When the length of pSdv is not the same as x0. + + + + Initialize parameters. + + The current location of the sampler. + + + + Checking that the location and the momentum are of the same dimension and that each component is positive. + + The standard deviations used for sampling the momentum. + When the length of pSdv is not the same as Length or if any + component is negative. + When pSdv is null. + + + + Use for copying objects in the Burn method. + + The source of copying. + A copy of the source object. + + + + Use for creating temporary objects in the Burn method. + + An object of type T. + + + + + + + + + + + + + Samples the momentum from a normal distribution. + + The momentum to be randomized. + + + + The default method used for computing the gradient. Uses a simple three point estimation. + + Function which the gradient is to be evaluated. + The location where the gradient is to be evaluated. + The gradient of the function at the point x. + + + + The Hybrid (also called Hamiltonian) Monte Carlo produces samples from distribution P using a set + of Hamiltonian equations to guide the sampling process. It uses the negative of the log density as + a potential energy, and a randomly generated momentum to set up a Hamiltonian system, which is then used + to sample the distribution. This can result in a faster convergence than the random walk Metropolis sampler + (). + + The type of samples this sampler produces. + + + + The delegate type that defines a derivative evaluated at a certain point. + + Function to be differentiated. + Value where the derivative is computed. + + + + Evaluates the energy function of the target distribution. + + + + + The current location of the sampler. + + + + + The number of burn iterations between two samples. + + + + + The size of each step in the Hamiltonian equation. + + + + + The number of iterations in the Hamiltonian equation. + + + + + The algorithm used for differentiation. + + + + + Gets or sets the number of iterations in between returning samples. + + When burn interval is negative. + + + + Gets or sets the number of iterations in the Hamiltonian equation. + + When frogleap steps is negative or zero. + + + + Gets or sets the size of each step in the Hamiltonian equation. + + When step size is negative or zero. + + + + Constructs a new Hybrid Monte Carlo sampler. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + Random number generator used for sampling the momentum. + The method used for differentiation. + When the number of burnInterval iteration is negative. + When either x0, pdfLnP or diff is null. + + + + Returns a sample from the distribution P. + + + + + This method runs the sampler for a number of iterations without returning a sample + + + + + Method used to update the sample location. Used in the end of the loop. + + The old energy. + The old gradient/derivative of the energy. + The new sample. + The new gradient/derivative of the energy. + The new energy. + The difference between the old Hamiltonian and new Hamiltonian. Use to determine + if an update should take place. + + + + Use for creating temporary objects in the Burn method. + + An object of type T. + + + + Use for copying objects in the Burn method. + + The source of copying. + A copy of the source object. + + + + Method for doing dot product. + + First vector/scalar in the product. + Second vector/scalar in the product. + + + + Method for adding, multiply the second vector/scalar by factor and then + add it to the first vector/scalar. + + First vector/scalar. + Scalar factor multiplying by the second vector/scalar. + Second vector/scalar. + + + + Multiplying the second vector/scalar by factor and then subtract it from + the first vector/scalar. + + First vector/scalar. + Scalar factor to be multiplied to the second vector/scalar. + Second vector/scalar. + + + + Method for sampling a random momentum. + + Momentum to be randomized. + + + + The Hamiltonian equations that is used to produce the new sample. + + + + + Method to compute the Hamiltonian used in the method. + + The momentum. + The energy. + Hamiltonian=E+p.p/2 + + + + Method to check and set a quantity to a non-negative value. + + Proposed value to be checked. + Returns value if it is greater than or equal to zero. + Throws when value is negative. + + + + Method to check and set a quantity to a non-negative value. + + Proposed value to be checked. + Returns value if it is greater than to zero. + Throws when value is negative or zero. + + + + Method to check and set a quantity to a non-negative value. + + Proposed value to be checked. + Returns value if it is greater than zero. + Throws when value is negative or zero. + + + + Provides utilities to analysis the convergence of a set of samples from + a . + + + + + Computes the auto correlations of a series evaluated by a function f. + + The series for computing the auto correlation. + The lag in the series + The function used to evaluate the series. + The auto correlation. + Throws if lag is zero or if lag is + greater than or equal to the length of Series. + + + + Computes the effective size of the sample when evaluated by a function f. + + The samples. + The function use for evaluating the series. + The effective size when auto correlation is taken into account. + + + + A method which samples datapoints from a proposal distribution. The implementation of this sampler + is stateless: no variables are saved between two calls to Sample. This proposal is different from + in that it doesn't take any parameters; it samples random + variables from the whole domain. + + The type of the datapoints. + A sample from the proposal distribution. + + + + A method which samples datapoints from a proposal distribution given an initial sample. The implementation + of this sampler is stateless: no variables are saved between two calls to Sample. This proposal is different from + in that it samples locally around an initial point. In other words, it + makes a small local move rather than producing a global sample from the proposal. + + The type of the datapoints. + The initial sample. + A sample from the proposal distribution. + + + + A function which evaluates a density. + + The type of data the distribution is over. + The sample we want to evaluate the density for. + + + + A function which evaluates a log density. + + The type of data the distribution is over. + The sample we want to evaluate the log density for. + + + + A function which evaluates the log of a transition kernel probability. + + The type for the space over which this transition kernel is defined. + The new state in the transition. + The previous state in the transition. + The log probability of the transition. + + + + The interface which every sampler must implement. + + The type of samples this sampler produces. + + + + The random number generator for this class. + + + + + Keeps track of the number of accepted samples. + + + + + Keeps track of the number of calls to the proposal sampler. + + + + + Initializes a new instance of the class. + + Thread safe instances are two and half times slower than non-thread + safe classes. + + + + Gets or sets the random number generator. + + When the random number generator is null. + + + + Returns one sample. + + + + + Returns a number of samples. + + The number of samples we want. + An array of samples. + + + + Gets the acceptance rate of the sampler. + + + + + Metropolis-Hastings sampling produces samples from distribition P by sampling from a proposal distribution Q + and accepting/rejecting based on the density of P. Metropolis-Hastings sampling doesn't require that the + proposal distribution Q is symmetric in comparison to . It does need to + be able to evaluate the proposal sampler's log density though. All densities are required to be in log space. + + The Metropolis-Hastings sampler is a stateful sampler. It keeps track of where it currently is in the domain + of the distribution P. + + The type of samples this sampler produces. + + + + Evaluates the log density function of the target distribution. + + + + + Evaluates the log transition probability for the proposal distribution. + + + + + A function which samples from a proposal distribution. + + + + + The current location of the sampler. + + + + + The log density at the current location. + + + + + The number of burn iterations between two samples. + + + + + Constructs a new Metropolis-Hastings sampler using the default random number generator. This + constructor will set the burn interval. + + The initial sample. + The log density of the distribution we want to sample from. + The log transition probability for the proposal distribution. + A method that samples from the proposal distribution. + The number of iterations in between returning samples. + When the number of burnInterval iteration is negative. + + + + Gets or sets the number of iterations in between returning samples. + + When burn interval is negative. + + + + This method runs the sampler for a number of iterations without returning a sample + + + + + Returns a sample from the distribution P. + + + + + Metropolis sampling produces samples from distribition P by sampling from a proposal distribution Q + and accepting/rejecting based on the density of P. Metropolis sampling requires that the proposal + distribution Q is symmetric. All densities are required to be in log space. + + The Metropolis sampler is a stateful sampler. It keeps track of where it currently is in the domain + of the distribution P. + + The type of samples this sampler produces. + + + + Evaluates the log density function of the sampling distribution. + + + + + A function which samples from a proposal distribution. + + + + + The current location of the sampler. + + + + + The log density at the current location. + + + + + The number of burn iterations between two samples. + + + + + Constructs a new Metropolis sampler using the default random number generator. + + The initial sample. + The log density of the distribution we want to sample from. + A method that samples from the symmetric proposal distribution. + The number of iterations in between returning samples. + When the number of burnInterval iteration is negative. + + + + Gets or sets the number of iterations in between returning samples. + + When burn interval is negative. + + + + This method runs the sampler for a number of iterations without returning a sample + + + + + Returns a sample from the distribution P. + + + + + Rejection sampling produces samples from distribition P by sampling from a proposal distribution Q + and accepting/rejecting based on the density of P and Q. The density of P and Q don't need to + to be normalized, but we do need that for each x, P(x) < Q(x). + + The type of samples this sampler produces. + + + + Evaluates the density function of the sampling distribution. + + + + + Evaluates the density function of the proposal distribution. + + + + + A function which samples from a proposal distribution. + + + + + Constructs a new rejection sampler using the default random number generator. + + The density of the distribution we want to sample from. + The density of the proposal distribution. + A method that samples from the proposal distribution. + + + + Returns a sample from the distribution P. + + When the algorithms detects that the proposal + distribution doesn't upper bound the target distribution. + + + + A hybrid Monte Carlo sampler for univariate distributions. + + + + + Distribution to sample momentum from. + + + + + Standard deviations used in the sampling of the + momentum. + + + + + Gets or sets the standard deviation used in the sampling of the + momentum. + + When standard deviation is negative. + + + + Constructs a new Hybrid Monte Carlo sampler for a univariate probability distribution. + The momentum will be sampled from a normal distribution with standard deviation + specified by pSdv using the default random + number generator. A three point estimation will be used for differentiation. + This constructor will set the burn interval. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + The standard deviation of the normal distribution that is used to sample + the momentum. + When the number of burnInterval iteration is negative. + + + + Constructs a new Hybrid Monte Carlo sampler for a univariate probability distribution. + The momentum will be sampled from a normal distribution with standard deviation + specified by pSdv using a random + number generator provided by the user. A three point estimation will be used for differentiation. + This constructor will set the burn interval. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + The standard deviation of the normal distribution that is used to sample + the momentum. + Random number generator used to sample the momentum. + When the number of burnInterval iteration is negative. + + + + Constructs a new Hybrid Monte Carlo sampler for a multivariate probability distribution. + The momentum will be sampled from a normal distribution with standard deviation + given by pSdv using a random + number generator provided by the user. This constructor will set both the burn interval and the method used for + numerical differentiation. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + The standard deviation of the normal distribution that is used to sample + the momentum. + The method used for numerical differentiation. + Random number generator used for sampling the momentum. + When the number of burnInterval iteration is negative. + + + + Use for copying objects in the Burn method. + + The source of copying. + A copy of the source object. + + + + Use for creating temporary objects in the Burn method. + + An object of type T. + + + + + + + + + + + + + Samples the momentum from a normal distribution. + + The momentum to be randomized. + + + + The default method used for computing the derivative. Uses a simple three point estimation. + + Function for which the derivative is to be evaluated. + The location where the derivative is to be evaluated. + The derivative of the function at the point x. + + + + Slice sampling produces samples from distribition P by uniformly sampling from under the pdf of P using + a technique described in "Slice Sampling", R. Neal, 2003. All densities are required to be in log space. + + The slice sampler is a stateful sampler. It keeps track of where it currently is in the domain + of the distribution P. + + + + + Evaluates the log density function of the target distribution. + + + + + The current location of the sampler. + + + + + The log density at the current location. + + + + + The number of burn iterations between two samples. + + + + + The scale of the slice sampler. + + + + + Constructs a new Slice sampler using the default random + number generator. The burn interval will be set to 0. + + The initial sample. + The density of the distribution we want to sample from. + The scale factor of the slice sampler. + When the scale of the slice sampler is not positive. + + + + Constructs a new slice sampler using the default random number generator. It + will set the number of burnInterval iterations and run a burnInterval phase. + + The initial sample. + The density of the distribution we want to sample from. + The number of iterations in between returning samples. + The scale factor of the slice sampler. + When the number of burnInterval iteration is negative. + When the scale of the slice sampler is not positive. + + + + Gets or sets the number of iterations in between returning samples. + + When burn interval is negative. + + + + Gets or sets the scale of the slice sampler. + + + + + This method runs the sampler for a number of iterations without returning a sample + + + + + Returns a sample from the distribution P. + + + + + Running statistics over a window of data, allows updating by adding values. + + + + + Gets the total number of samples. + + + + + Returns the minimum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + + + + Returns the maximum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + + + + Evaluates the sample mean, an estimate of the population mean. + Returns NaN if data is empty or if any entry is NaN. + + + + + Estimates the unbiased population variance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + + + + Evaluates the variance from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + + + + Estimates the unbiased population standard deviation from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + + + + Evaluates the standard deviation from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + + + + Update the running statistics by adding another observed sample (in-place). + + + + + Update the running statistics by adding a sequence of observed sample (in-place). + + + + Replace ties with their mean (non-integer ranks). Default. + + + Replace ties with their minimum (typical sports ranking). + + + Replace ties with their maximum. + + + Permutation with increasing values at each index of ties. + + + + Running statistics accumulator, allows updating by adding values + or by combining two accumulators. + + + This type declares a DataContract for out of the box ephemeral serialization + with engines like DataContractSerializer, Protocol Buffers and FsPickler, + but does not guarantee any compatibility between versions. + It is not recommended to rely on this mechanism for durable persistance. + + + + + Gets the total number of samples. + + + + + Returns the minimum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + + + + Returns the maximum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + + + + Evaluates the sample mean, an estimate of the population mean. + Returns NaN if data is empty or if any entry is NaN. + + + + + Estimates the unbiased population variance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + + + + Evaluates the variance from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + + + + Estimates the unbiased population standard deviation from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + + + + Evaluates the standard deviation from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + + + + Estimates the unbiased population skewness from the provided samples. + Uses a normalizer (Bessel's correction; type 2). + Returns NaN if data has less than three entries or if any entry is NaN. + + + + + Evaluates the population skewness from the full population. + Does not use a normalizer and would thus be biased if applied to a subset (type 1). + Returns NaN if data has less than two entries or if any entry is NaN. + + + + + Estimates the unbiased population kurtosis from the provided samples. + Uses a normalizer (Bessel's correction; type 2). + Returns NaN if data has less than four entries or if any entry is NaN. + + + + + Evaluates the population kurtosis from the full population. + Does not use a normalizer and would thus be biased if applied to a subset (type 1). + Returns NaN if data has less than three entries or if any entry is NaN. + + + + + Update the running statistics by adding another observed sample (in-place). + + + + + Update the running statistics by adding a sequence of observed sample (in-place). + + + + + Create a new running statistics over the combined samples of two existing running statistics. + + + + + Statistics operating on an array already sorted ascendingly. + + + + + + + + Returns the smallest value from the sorted data array (ascending). + + Sample array, must be sorted ascendingly. + + + + Returns the largest value from the sorted data array (ascending). + + Sample array, must be sorted ascendingly. + + + + Returns the order statistic (order 1..N) from the sorted data array (ascending). + + Sample array, must be sorted ascendingly. + One-based order of the statistic, must be between 1 and N (inclusive). + + + + Estimates the median value from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the p-Percentile value from the sorted data array (ascending). + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + Percentile selector, between 0 and 100 (inclusive). + + + + Estimates the first quartile value from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the third quartile value from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the inter-quartile range from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates {min, lower-quantile, median, upper-quantile, max} from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the tau-th quantile from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + Quantile selector, between 0.0 and 1.0 (inclusive). + + R-8, SciPy-(1/3,1/3): + Linear interpolation of the approximate medians for order statistics. + When tau < (2/3) / (N + 1/3), use x1. When tau >= (N - 1/3) / (N + 1/3), use xN. + + + + + Estimates the tau-th quantile from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile defintion can be specified + by 4 parameters a, b, c and d, consistent with Mathematica. + + Sample array, must be sorted ascendingly. + Quantile selector, between 0.0 and 1.0 (inclusive). + a-parameter + b-parameter + c-parameter + d-parameter + + + + Estimates the tau-th quantile from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + Sample array, must be sorted ascendingly. + Quantile selector, between 0.0 and 1.0 (inclusive). + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the empirical cumulative distribution function (CDF) at x from the sorted data array (ascending). + + The data sample sequence. + The value where to estimate the CDF at. + + + + Estimates the quantile tau from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile value. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Evaluates the rank of each entry of the sorted data array (ascending). + The rank definition can be specified to be compatible + with an existing system. + + + + + Returns the smallest value from the sorted data array (ascending). + + Sample array, must be sorted ascendingly. + + + + Returns the largest value from the sorted data array (ascending). + + Sample array, must be sorted ascendingly. + + + + Returns the order statistic (order 1..N) from the sorted data array (ascending). + + Sample array, must be sorted ascendingly. + One-based order of the statistic, must be between 1 and N (inclusive). + + + + Estimates the median value from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the p-Percentile value from the sorted data array (ascending). + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + Percentile selector, between 0 and 100 (inclusive). + + + + Estimates the first quartile value from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the third quartile value from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the inter-quartile range from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates {min, lower-quantile, median, upper-quantile, max} from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the tau-th quantile from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + Quantile selector, between 0.0 and 1.0 (inclusive). + + R-8, SciPy-(1/3,1/3): + Linear interpolation of the approximate medians for order statistics. + When tau < (2/3) / (N + 1/3), use x1. When tau >= (N - 1/3) / (N + 1/3), use xN. + + + + + Estimates the tau-th quantile from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile defintion can be specified + by 4 parameters a, b, c and d, consistent with Mathematica. + + Sample array, must be sorted ascendingly. + Quantile selector, between 0.0 and 1.0 (inclusive). + a-parameter + b-parameter + c-parameter + d-parameter + + + + Estimates the tau-th quantile from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + Sample array, must be sorted ascendingly. + Quantile selector, between 0.0 and 1.0 (inclusive). + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the empirical cumulative distribution function (CDF) at x from the sorted data array (ascending). + + The data sample sequence. + The value where to estimate the CDF at. + + + + Estimates the quantile tau from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile value. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Evaluates the rank of each entry of the sorted data array (ascending). + The rank definition can be specified to be compatible + with an existing system. + + + + + Extension methods to return basic statistics on set of data. + + + + + Returns the minimum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Returns the minimum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Returns the minimum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + Null-entries are ignored. + + The sample data. + The minimum value in the sample data. + + + + Returns the maximum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The maximum value in the sample data. + + + + Returns the maximum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The maximum value in the sample data. + + + + Returns the maximum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + Null-entries are ignored. + + The sample data. + The maximum value in the sample data. + + + + Returns the minimum absolute value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Returns the minimum absolute value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Returns the maximum absolute value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The maximum value in the sample data. + + + + Returns the maximum absolute value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The maximum value in the sample data. + + + + Returns the minimum magnitude and phase value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Returns the minimum magnitude and phase value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Returns the maximum magnitude and phase value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Returns the maximum magnitude and phase value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Evaluates the sample mean, an estimate of the population mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the mean of. + The mean of the sample. + + + + Evaluates the sample mean, an estimate of the population mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the mean of. + The mean of the sample. + + + + Evaluates the sample mean, an estimate of the population mean. + Returns NaN if data is empty or if any entry is NaN. + Null-entries are ignored. + + The data to calculate the mean of. + The mean of the sample. + + + + Evaluates the geometric mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the geometric mean of. + The geometric mean of the sample. + + + + Evaluates the geometric mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the geometric mean of. + The geometric mean of the sample. + + + + Evaluates the harmonic mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the harmonic mean of. + The harmonic mean of the sample. + + + + Evaluates the harmonic mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the harmonic mean of. + The harmonic mean of the sample. + + + + Estimates the unbiased population variance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population variance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population variance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + Null-entries are ignored. + + A subset of samples, sampled from the full population. + + + + Evaluates the variance from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + The full population data. + + + + Evaluates the variance from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + The full population data. + + + + Evaluates the variance from the provided full population. + On a dataset of size N will use an N normalize and would thus be biased if applied to a subsetr. + Returns NaN if data is empty or if any entry is NaN. + Null-entries are ignored. + + The full population data. + + + + Estimates the unbiased population standard deviation from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population standard deviation from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population standard deviation from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + Null-entries are ignored. + + A subset of samples, sampled from the full population. + + + + Evaluates the standard deviation from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + The full population data. + + + + Evaluates the standard deviation from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + The full population data. + + + + Evaluates the standard deviation from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + Null-entries are ignored. + + The full population data. + + + + Estimates the unbiased population skewness from the provided samples. + Uses a normalizer (Bessel's correction; type 2). + Returns NaN if data has less than three entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population skewness from the provided samples. + Uses a normalizer (Bessel's correction; type 2). + Returns NaN if data has less than three entries or if any entry is NaN. + Null-entries are ignored. + + A subset of samples, sampled from the full population. + + + + Evaluates the skewness from the full population. + Does not use a normalizer and would thus be biased if applied to a subset (type 1). + Returns NaN if data has less than two entries or if any entry is NaN. + + The full population data. + + + + Evaluates the skewness from the full population. + Does not use a normalizer and would thus be biased if applied to a subset (type 1). + Returns NaN if data has less than two entries or if any entry is NaN. + Null-entries are ignored. + + The full population data. + + + + Estimates the unbiased population kurtosis from the provided samples. + Uses a normalizer (Bessel's correction; type 2). + Returns NaN if data has less than four entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population kurtosis from the provided samples. + Uses a normalizer (Bessel's correction; type 2). + Returns NaN if data has less than four entries or if any entry is NaN. + Null-entries are ignored. + + A subset of samples, sampled from the full population. + + + + Evaluates the kurtosis from the full population. + Does not use a normalizer and would thus be biased if applied to a subset (type 1). + Returns NaN if data has less than three entries or if any entry is NaN. + + The full population data. + + + + Evaluates the kurtosis from the full population. + Does not use a normalizer and would thus be biased if applied to a subset (type 1). + Returns NaN if data has less than three entries or if any entry is NaN. + Null-entries are ignored. + + The full population data. + + + + Estimates the sample mean and the unbiased population variance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or if any entry is NaN and NaN for variance if data has less than two entries or if any entry is NaN. + + The data to calculate the mean of. + The mean of the sample. + + + + Estimates the sample mean and the unbiased population variance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or if any entry is NaN and NaN for variance if data has less than two entries or if any entry is NaN. + + The data to calculate the mean of. + The mean of the sample. + + + + Estimates the sample mean and the unbiased population standard deviation from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or if any entry is NaN and NaN for standard deviation if data has less than two entries or if any entry is NaN. + + The data to calculate the mean of. + The mean of the sample. + + + + Estimates the sample mean and the unbiased population standard deviation from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or if any entry is NaN and NaN for standard deviation if data has less than two entries or if any entry is NaN. + + The data to calculate the mean of. + The mean of the sample. + + + + Estimates the unbiased population skewness and kurtosis from the provided samples in a single pass. + Uses a normalizer (Bessel's correction; type 2). + + A subset of samples, sampled from the full population. + + + + Evaluates the skewness and kurtosis from the full population. + Does not use a normalizer and would thus be biased if applied to a subset (type 1). + + The full population data. + + + + Estimates the unbiased population covariance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population covariance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population covariance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + Null-entries are ignored. + + A subset of samples, sampled from the full population. + A subset of samples, sampled from the full population. + + + + Evaluates the population covariance from the provided full populations. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + The full population data. + The full population data. + + + + Evaluates the population covariance from the provided full populations. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + The full population data. + The full population data. + + + + Evaluates the population covariance from the provided full populations. + On a dataset of size N will use an N normalize and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + Null-entries are ignored. + + The full population data. + The full population data. + + + + Evaluates the root mean square (RMS) also known as quadratic mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the RMS of. + + + + Evaluates the root mean square (RMS) also known as quadratic mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the RMS of. + + + + Evaluates the root mean square (RMS) also known as quadratic mean. + Returns NaN if data is empty or if any entry is NaN. + Null-entries are ignored. + + The data to calculate the mean of. + + + + Estimates the sample median from the provided samples (R8). + + The data sample sequence. + + + + Estimates the sample median from the provided samples (R8). + + The data sample sequence. + + + + Estimates the sample median from the provided samples (R8). + + The data sample sequence. + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the p-Percentile value from the provided samples. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + Percentile selector, between 0 and 100 (inclusive). + + + + Estimates the p-Percentile value from the provided samples. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + Percentile selector, between 0 and 100 (inclusive). + + + + Estimates the p-Percentile value from the provided samples. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + Percentile selector, between 0 and 100 (inclusive). + + + + Estimates the p-Percentile value from the provided samples. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the p-Percentile value from the provided samples. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the p-Percentile value from the provided samples. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the first quartile value from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the first quartile value from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the first quartile value from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the third quartile value from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the third quartile value from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the third quartile value from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the inter-quartile range from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the inter-quartile range from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the inter-quartile range from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates {min, lower-quantile, median, upper-quantile, max} from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates {min, lower-quantile, median, upper-quantile, max} from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates {min, lower-quantile, median, upper-quantile, max} from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Returns the order statistic (order 1..N) from the provided samples. + + The data sample sequence. + One-based order of the statistic, must be between 1 and N (inclusive). + + + + Returns the order statistic (order 1..N) from the provided samples. + + The data sample sequence. + One-based order of the statistic, must be between 1 and N (inclusive). + + + + Returns the order statistic (order 1..N) from the provided samples. + + The data sample sequence. + + + + Returns the order statistic (order 1..N) from the provided samples. + + The data sample sequence. + + + + Evaluates the rank of each entry of the provided samples. + The rank definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Evaluates the rank of each entry of the provided samples. + The rank definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Evaluates the rank of each entry of the provided samples. + The rank definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Estimates the quantile tau from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile value. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Estimates the quantile tau from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile value. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Estimates the quantile tau from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile value. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Estimates the quantile tau from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Estimates the quantile tau from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Estimates the quantile tau from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Estimates the empirical cumulative distribution function (CDF) at x from the provided samples. + + The data sample sequence. + The value where to estimate the CDF at. + + + + Estimates the empirical cumulative distribution function (CDF) at x from the provided samples. + + The data sample sequence. + The value where to estimate the CDF at. + + + + Estimates the empirical cumulative distribution function (CDF) at x from the provided samples. + + The data sample sequence. + The value where to estimate the CDF at. + + + + Estimates the empirical cumulative distribution function (CDF) at x from the provided samples. + + The data sample sequence. + + + + Estimates the empirical cumulative distribution function (CDF) at x from the provided samples. + + The data sample sequence. + + + + Estimates the empirical cumulative distribution function (CDF) at x from the provided samples. + + The data sample sequence. + + + + Estimates the empirical inverse CDF at tau from the provided samples. + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + + + + Estimates the empirical inverse CDF at tau from the provided samples. + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + + + + Estimates the empirical inverse CDF at tau from the provided samples. + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + + + + Estimates the empirical inverse CDF at tau from the provided samples. + + The data sample sequence. + + + + Estimates the empirical inverse CDF at tau from the provided samples. + + The data sample sequence. + + + + Estimates the empirical inverse CDF at tau from the provided samples. + + The data sample sequence. + + + + Calculates the entropy of a stream of double values in bits. + Returns NaN if any of the values in the stream are NaN. + + The data sample sequence. + + + + Calculates the entropy of a stream of double values in bits. + Returns NaN if any of the values in the stream are NaN. + Null-entries are ignored. + + The data sample sequence. + + + + Evaluates the sample mean over a moving window, for each samples. + Returns NaN if no data is empty or if any entry is NaN. + + The sample stream to calculate the mean of. + The number of last samples to consider. + + + + Statistics operating on an IEnumerable in a single pass, without keeping the full data in memory. + Can be used in a streaming way, e.g. on large datasets not fitting into memory. + + + + + + + + Returns the smallest value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the smallest value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the largest value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the largest value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the smallest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the smallest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the largest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the largest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the smallest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the smallest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the largest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the largest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the arithmetic sample mean from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the arithmetic sample mean from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the geometric mean of the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the geometric mean of the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the harmonic mean of the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the harmonic mean of the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the unbiased population variance from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the unbiased population variance from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the population variance from the full population provided as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the population variance from the full population provided as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the unbiased population standard deviation from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the unbiased population standard deviation from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the population standard deviation from the full population provided as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the population standard deviation from the full population provided as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population variance from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN, and NaN for variance if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population variance from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN, and NaN for variance if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population standard deviation from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN, and NaN for standard deviation if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population standard deviation from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN, and NaN for standard deviation if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the unbiased population covariance from the provided two sample enumerable sequences, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + First sample stream. + Second sample stream. + + + + Estimates the unbiased population covariance from the provided two sample enumerable sequences, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + First sample stream. + Second sample stream. + + + + Evaluates the population covariance from the full population provided as two enumerable sequences, in a single pass without memoization. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + First population stream. + Second population stream. + + + + Evaluates the population covariance from the full population provided as two enumerable sequences, in a single pass without memoization. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + First population stream. + Second population stream. + + + + Estimates the root mean square (RMS) also known as quadratic mean from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the root mean square (RMS) also known as quadratic mean from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Calculates the entropy of a stream of double values. + Returns NaN if any of the values in the stream are NaN. + + The input stream to evaluate. + + + + + Used to simplify parallel code, particularly between the .NET 4.0 and Silverlight Code. + + + + + Executes a for loop in which iterations may run in parallel. + + The start index, inclusive. + The end index, exclusive. + The body to be invoked for each iteration range. + + + + Executes a for loop in which iterations may run in parallel. + + The start index, inclusive. + The end index, exclusive. + The partition size for splitting work into smaller pieces. + The body to be invoked for each iteration range. + + + + Executes each of the provided actions inside a discrete, asynchronous task. + + An array of actions to execute. + The actions array contains a null element. + At least one invocation of the actions threw an exception. + + + + Selects an item (such as Max or Min). + + Starting index of the loop. + Ending index of the loop + The function to select items over a subset. + The function to select the item of selection from the subsets. + The selected value. + + + + Selects an item (such as Max or Min). + + The array to iterate over. + The function to select items over a subset. + The function to select the item of selection from the subsets. + The selected value. + + + + Selects an item (such as Max or Min). + + Starting index of the loop. + Ending index of the loop + The function to select items over a subset. + The function to select the item of selection from the subsets. + Default result of the reduce function on an empty set. + The selected value. + + + + Selects an item (such as Max or Min). + + The array to iterate over. + The function to select items over a subset. + The function to select the item of selection from the subsets. + Default result of the reduce function on an empty set. + The selected value. + + + + Double-precision trigonometry toolkit. + + + + + Constant to convert a degree to grad. + + + + + Converts a degree (360-periodic) angle to a grad (400-periodic) angle. + + The degree to convert. + The converted grad angle. + + + + Converts a degree (360-periodic) angle to a radian (2*Pi-periodic) angle. + + The degree to convert. + The converted radian angle. + + + + Converts a grad (400-periodic) angle to a degree (360-periodic) angle. + + The grad to convert. + The converted degree. + + + + Converts a grad (400-periodic) angle to a radian (2*Pi-periodic) angle. + + The grad to convert. + The converted radian. + + + + Converts a radian (2*Pi-periodic) angle to a degree (360-periodic) angle. + + The radian to convert. + The converted degree. + + + + Converts a radian (2*Pi-periodic) angle to a grad (400-periodic) angle. + + The radian to convert. + The converted grad. + + + + Normalized Sinc function. sinc(x) = sin(pi*x)/(pi*x). + + + + + Trigonometric Sine of an angle in radian, or opposite / hypotenuse. + + The angle in radian. + The sine of the radian angle. + + + + Trigonometric Sine of a Complex number. + + The complex value. + The sine of the complex number. + + + + Trigonometric Cosine of an angle in radian, or adjacent / hypotenuse. + + The angle in radian. + The cosine of an angle in radian. + + + + Trigonometric Cosine of a Complex number. + + The complex value. + The cosine of a complex number. + + + + Trigonometric Tangent of an angle in radian, or opposite / adjacent. + + The angle in radian. + The tangent of the radian angle. + + + + Trigonometric Tangent of a Complex number. + + The complex value. + The tangent of the complex number. + + + + Trigonometric Cotangent of an angle in radian, or adjacent / opposite. Reciprocal of the tangent. + + The angle in radian. + The cotangent of an angle in radian. + + + + Trigonometric Cotangent of a Complex number. + + The complex value. + The cotangent of the complex number. + + + + Trigonometric Secant of an angle in radian, or hypotenuse / adjacent. Reciprocal of the cosine. + + The angle in radian. + The secant of the radian angle. + + + + Trigonometric Secant of a Complex number. + + The complex value. + The secant of the complex number. + + + + Trigonometric Cosecant of an angle in radian, or hypotenuse / opposite. Reciprocal of the sine. + + The angle in radian. + Cosecant of an angle in radian. + + + + Trigonometric Cosecant of a Complex number. + + The complex value. + The cosecant of a complex number. + + + + Trigonometric principal Arc Sine in radian + + The opposite for a unit hypotenuse (i.e. opposite / hyptenuse). + The angle in radian. + + + + Trigonometric principal Arc Sine of this Complex number. + + The complex value. + The arc sine of a complex number. + + + + Trigonometric principal Arc Cosine in radian + + The adjacent for a unit hypotenuse (i.e. adjacent / hypotenuse). + The angle in radian. + + + + Trigonometric principal Arc Cosine of this Complex number. + + The complex value. + The arc cosine of a complex number. + + + + Trigonometric principal Arc Tangent in radian + + The opposite for a unit adjacent (i.e. opposite / adjacent). + The angle in radian. + + + + Trigonometric principal Arc Tangent of this Complex number. + + The complex value. + The arc tangent of a complex number. + + + + Trigonometric principal Arc Cotangent in radian + + The adjacent for a unit opposite (i.e. adjacent / opposite). + The angle in radian. + + + + Trigonometric principal Arc Cotangent of this Complex number. + + The complex value. + The arc cotangent of a complex number. + + + + Trigonometric principal Arc Secant in radian + + The hypotenuse for a unit adjacent (i.e. hypotenuse / adjacent). + The angle in radian. + + + + Trigonometric principal Arc Secant of this Complex number. + + The complex value. + The arc secant of a complex number. + + + + Trigonometric principal Arc Cosecant in radian + + The hypotenuse for a unit opposite (i.e. hypotenuse / opposite). + The angle in radian. + + + + Trigonometric principal Arc Cosecant of this Complex number. + + The complex value. + The arc cosecant of a complex number. + + + + Hyperbolic Sine + + The hyperbolic angle, i.e. the area of the hyperbolic sector. + The hyperbolic sine of the angle. + + + + Hyperbolic Sine of a Complex number. + + The complex value. + The hyperbolic sine of a complex number. + + + + Hyperbolic Cosine + + The hyperbolic angle, i.e. the area of the hyperbolic sector. + The hyperbolic Cosine of the angle. + + + + Hyperbolic Cosine of a Complex number. + + The complex value. + The hyperbolic cosine of a complex number. + + + + Hyperbolic Tangent in radian + + The hyperbolic angle, i.e. the area of the hyperbolic sector. + The hyperbolic tangent of the angle. + + + + Hyperbolic Tangent of a Complex number. + + The complex value. + The hyperbolic tangent of a complex number. + + + + Hyperbolic Cotangent + + The hyperbolic angle, i.e. the area of the hyperbolic sector. + The hyperbolic cotangent of the angle. + + + + Hyperbolic Cotangent of a Complex number. + + The complex value. + The hyperbolic cotangent of a complex number. + + + + Hyperbolic Secant + + The hyperbolic angle, i.e. the area of the hyperbolic sector. + The hyperbolic secant of the angle. + + + + Hyperbolic Secant of a Complex number. + + The complex value. + The hyperbolic secant of a complex number. + + + + Hyperbolic Cosecant + + The hyperbolic angle, i.e. the area of the hyperbolic sector. + The hyperbolic cosecant of the angle. + + + + Hyperbolic Cosecant of a Complex number. + + The complex value. + The hyperbolic cosecant of a complex number. + + + + Hyperbolic Area Sine + + The real value. + The hyperbolic angle, i.e. the area of its hyperbolic sector. + + + + Hyperbolic Area Sine of this Complex number. + + The complex value. + The hyperbolic arc sine of a complex number. + + + + Hyperbolic Area Cosine + + The real value. + The hyperbolic angle, i.e. the area of its hyperbolic sector. + + + + Hyperbolic Area Cosine of this Complex number. + + The complex value. + The hyperbolic arc cosine of a complex number. + + + + Hyperbolic Area Tangent + + The real value. + The hyperbolic angle, i.e. the area of its hyperbolic sector. + + + + Hyperbolic Area Tangent of this Complex number. + + The complex value. + The hyperbolic arc tangent of a complex number. + + + + Hyperbolic Area Cotangent + + The real value. + The hyperbolic angle, i.e. the area of its hyperbolic sector. + + + + Hyperbolic Area Cotangent of this Complex number. + + The complex value. + The hyperbolic arc cotangent of a complex number. + + + + Hyperbolic Area Secant + + The real value. + The hyperbolic angle, i.e. the area of its hyperbolic sector. + + + + Hyperbolic Area Secant of this Complex number. + + The complex value. + The hyperbolic arc secant of a complex number. + + + + Hyperbolic Area Cosecant + + The real value. + The hyperbolic angle, i.e. the area of its hyperbolic sector. + + + + Hyperbolic Area Cosecant of this Complex number. + + The complex value. + The hyperbolic arc cosecant of a complex number. + + + + Hamming window. Named after Richard Hamming. + Symmetric version, useful e.g. for filter design purposes. + + + + + Hamming window. Named after Richard Hamming. + Periodic version, useful e.g. for FFT purposes. + + + + + Hann window. Named after Julius von Hann. + Symmetric version, useful e.g. for filter design purposes. + + + + + Hann window. Named after Julius von Hann. + Periodic version, useful e.g. for FFT purposes. + + + + + Cosine window. + Symmetric version, useful e.g. for filter design purposes. + + + + + Cosine window. + Periodic version, useful e.g. for FFT purposes. + + + + + Lanczos window. + Symmetric version, useful e.g. for filter design purposes. + + + + + Lanczos window. + Periodic version, useful e.g. for FFT purposes. + + + + + Gauss window. + + + + + Blackman window. + + + + + Blackman-Harris window. + + + + + Blackman-Nuttall window. + + + + + Bartlett window. + + + + + Bartlett-Hann window. + + + + + Nuttall window. + + + + + Flat top window. + + + + + Uniform rectangular (dirichlet) window. + + + + + Triangular window. + + + + + A strongly-typed resource class, for looking up localized strings, etc. + + + + + Returns the cached ResourceManager instance used by this class. + + + + + Overrides the current thread's CurrentUICulture property for all + resource lookups using this strongly typed resource class. + + + + + Looks up a localized string similar to The accuracy couldn't be reached with the specified number of iterations.. + + + + + Looks up a localized string similar to The array arguments must have the same length.. + + + + + Looks up a localized string similar to The given array has the wrong length. Should be {0}.. + + + + + Looks up a localized string similar to The argument must be between 0 and 1.. + + + + + Looks up a localized string similar to Value cannot be in the range -1 < x < 1.. + + + + + Looks up a localized string similar to Value must be even.. + + + + + Looks up a localized string similar to The histogram does not contain the value.. + + + + + Looks up a localized string similar to Value is expected to be between {0} and {1} (including {0} and {1}).. + + + + + Looks up a localized string similar to At least one item of {0} is a null reference (Nothing in Visual Basic).. + + + + + Looks up a localized string similar to Value must be greater than or equal to one.. + + + + + Looks up a localized string similar to Matrix dimensions must agree.. + + + + + Looks up a localized string similar to Matrix dimensions must agree: {0}.. + + + + + Looks up a localized string similar to Matrix dimensions must agree: op1 is {0}, op2 is {1}.. + + + + + Looks up a localized string similar to Matrix dimensions must agree: op1 is {0}, op2 is {1}, op3 is {2}.. + + + + + Looks up a localized string similar to The requested matrix does not exist.. + + + + + Looks up a localized string similar to The matrix indices must not be out of range of the given matrix.. + + + + + Looks up a localized string similar to Matrix must not be rank deficient.. + + + + + Looks up a localized string similar to Matrix must not be singular.. + + + + + Looks up a localized string similar to Matrix must be positive definite.. + + + + + Looks up a localized string similar to Matrix column dimensions must agree.. + + + + + Looks up a localized string similar to Matrix row dimensions must agree.. + + + + + Looks up a localized string similar to Matrix must have exactly one column.. + + + + + Looks up a localized string similar to Matrix must have exactly one column and row, thus have only one cell.. + + + + + Looks up a localized string similar to Matrix must have exactly one row.. + + + + + Looks up a localized string similar to Matrix must be square.. + + + + + Looks up a localized string similar to Matrix must be symmetric.. + + + + + Looks up a localized string similar to Matrix must be symmetric positive definite.. + + + + + Looks up a localized string similar to In the specified range, the exclusive maximum must be greater than the inclusive minimum.. + + + + + Looks up a localized string similar to In the specified range, the minimum is greater than maximum.. + + + + + Looks up a localized string similar to Value must be positive.. + + + + + Looks up a localized string similar to Value must neither be infinite nor NaN.. + + + + + Looks up a localized string similar to Value must not be negative (zero is ok).. + + + + + Looks up a localized string similar to {0} is a null reference (Nothing in Visual Basic).. + + + + + Looks up a localized string similar to Value must be odd.. + + + + + Looks up a localized string similar to {0} must be greater than {1}.. + + + + + Looks up a localized string similar to {0} must be greater than or equal to {1}.. + + + + + Looks up a localized string similar to {0} must be smaller than {1}.. + + + + + Looks up a localized string similar to {0} must be smaller than or equal to {1}.. + + + + + Looks up a localized string similar to The chosen parameter set is invalid (probably some value is out of range).. + + + + + Looks up a localized string similar to The given expression does not represent a complex number.. + + + + + Looks up a localized string similar to Value must be positive (and not zero).. + + + + + Looks up a localized string similar to Size must be a Power of Two.. + + + + + Looks up a localized string similar to Size must be a Power of Two in every dimension.. + + + + + Looks up a localized string similar to The range between {0} and {1} must be less than or equal to {2}.. + + + + + Looks up a localized string similar to Arguments must be different objects.. + + + + + Looks up a localized string similar to Array must have exactly one dimension (and not be null).. + + + + + Looks up a localized string similar to Value is too large.. + + + + + Looks up a localized string similar to Value is too large for the current iteration limit.. + + + + + Looks up a localized string similar to Type mismatch.. + + + + + Looks up a localized string similar to The upper bound must be strictly larger than the lower bound.. + + + + + Looks up a localized string similar to The upper bound must be at least as large as the lower bound.. + + + + + Looks up a localized string similar to Array length must be a multiple of {0}.. + + + + + Looks up a localized string similar to All vectors must have the same dimensionality.. + + + + + Looks up a localized string similar to The vector must have 3 dimensions.. + + + + + Looks up a localized string similar to The given array is too small. It must be at least {0} long.. + + + + + Looks up a localized string similar to Big endian files are not supported.. + + + + + Looks up a localized string similar to The supplied collection is empty.. + + + + + Looks up a localized string similar to Complex matrices are not supported.. + + + + + Looks up a localized string similar to An algorithm failed to converge.. + + + + + Looks up a localized string similar to The sample size must be larger than the given degrees of freedom.. + + + + + Looks up a localized string similar to This feature is not implemented yet (but is planned).. + + + + + Looks up a localized string similar to The given file doesn't exist.. + + + + + Looks up a localized string similar to Sample points should be sorted in strictly ascending order. + + + + + Looks up a localized string similar to All sample points should be unique.. + + + + + Looks up a localized string similar to Invalid parameterization for the distribution.. + + + + + Looks up a localized string similar to Invalid Left Boundary Condition.. + + + + + Looks up a localized string similar to The operation could not be performed because the accumulator is empty.. + + + + + Looks up a localized string similar to The operation could not be performed because the histogram is empty.. + + + + + Looks up a localized string similar to Not enough points in the distribution.. + + + + + Looks up a localized string similar to No Samples Provided. Preparation Required.. + + + + + Looks up a localized string similar to An invalid parameter was passed to a native method.. + + + + + Looks up a localized string similar to An invalid parameter was passed to a native method, parameter number : {0}. + + + + + Looks up a localized string similar to Invalid Right Boundary Condition.. + + + + + Looks up a localized string similar to Lag must be positive. + + + + + Looks up a localized string similar to Lag must be smaller than the sample size. + + + + + Looks up a localized string similar to ddd MMM dd HH:mm:ss yyyy. + + + + + Looks up a localized string similar to Matrices can not be empty and must have at least one row and column.. + + + + + Looks up a localized string similar to The number of columns of a matrix must be positive.. + + + + + Looks up a localized string similar to Matrix must be in sparse storage format. + + + + + Looks up a localized string similar to The number of rows of a matrix must be positive.. + + + + + Looks up a localized string similar to The number of rows or columns of a matrix must be positive.. + + + + + Looks up a localized string similar to Unable to allocate native memory.. + + + + + Looks up a localized string similar to Only 1 and 2 dimensional arrays are supported.. + + + + + Looks up a localized string similar to Data must contain at least {0} values.. + + + + + Looks up a localized string similar to Name cannot contain a space. name: {0}. + + + + + Looks up a localized string similar to {0} is not a supported type.. + + + + + Looks up a localized string similar to Algorithm experience a numerical break down + . + + + + + Looks up a localized string similar to The two arguments can't be compared (maybe they are part of a partial ordering?). + + + + + Looks up a localized string similar to The integer array does not represent a valid permutation.. + + + + + Looks up a localized string similar to The sampler's proposal distribution is not upper bounding the target density.. + + + + + Looks up a localized string similar to A regression of the requested order requires at least {0} samples. Only {1} samples have been provided. . + + + + + Looks up a localized string similar to The algorithm has failed, exceeded the number of iterations allowed or there is no root within the provided bounds.. + + + + + Looks up a localized string similar to The algorithm has failed, exceeded the number of iterations allowed or there is no root within the provided bounds. Consider to use RobustNewtonRaphson instead.. + + + + + Looks up a localized string similar to The lower and upper bounds must bracket a single root.. + + + + + Looks up a localized string similar to The algorithm ended without root in the range.. + + + + + Looks up a localized string similar to The number of rows must greater than or equal to the number of columns.. + + + + + Looks up a localized string similar to All sample vectors must have the same length. However, vectors with disagreeing length {0} and {1} have been provided. A sample with index i is given by the value at index i of each provided vector.. + + + + + Looks up a localized string similar to U is singular, and the inversion could not be completed.. + + + + + Looks up a localized string similar to U is singular, and the inversion could not be completed. The {0}-th diagonal element of the factor U is zero.. + + + + + Looks up a localized string similar to The singular vectors were not computed.. + + + + + Looks up a localized string similar to This special case is not supported yet (but is planned).. + + + + + Looks up a localized string similar to The given stop criterion already exist in the collection.. + + + + + Looks up a localized string similar to There is no stop criterion in the collection.. + + + + + Looks up a localized string similar to String parameter cannot be empty or null.. + + + + + Looks up a localized string similar to We only support sparse matrix with less than int.MaxValue elements.. + + + + + Looks up a localized string similar to The moment of the distribution is undefined.. + + + + + Looks up a localized string similar to A user defined provider has not been specified.. + + + + + Looks up a localized string similar to User work buffers are not supported by this provider.. + + + + + Looks up a localized string similar to Vectors can not be empty and must have at least one element.. + + + + + Looks up a localized string similar to The given work array is too small. Check work[0] for the corret size.. + + +
+
diff --git a/src/packages/MathNet.Numerics.3.16.0/lib/portable-net4+sl5+netcore45+wpa81+wp8+MonoAndroid1+MonoTouch1/MathNet.Numerics.dll b/src/packages/MathNet.Numerics.3.16.0/lib/portable-net4+sl5+netcore45+wpa81+wp8+MonoAndroid1+MonoTouch1/MathNet.Numerics.dll new file mode 100644 index 0000000..a58d6db Binary files /dev/null and b/src/packages/MathNet.Numerics.3.16.0/lib/portable-net4+sl5+netcore45+wpa81+wp8+MonoAndroid1+MonoTouch1/MathNet.Numerics.dll differ diff --git a/src/packages/MathNet.Numerics.3.16.0/lib/portable-net45+netcore45+MonoAndroid1+MonoTouch1/MathNet.Numerics.XML b/src/packages/MathNet.Numerics.3.16.0/lib/portable-net45+netcore45+MonoAndroid1+MonoTouch1/MathNet.Numerics.XML new file mode 100644 index 0000000..59edef1 --- /dev/null +++ b/src/packages/MathNet.Numerics.3.16.0/lib/portable-net45+netcore45+MonoAndroid1+MonoTouch1/MathNet.Numerics.XML @@ -0,0 +1,49130 @@ + + + + MathNet.Numerics + + + + + Useful extension methods for Arrays. + + + + + Copies the values from on array to another. + + The source array. + The destination array. + + + + Copies the values from on array to another. + + The source array. + The destination array. + + + + Copies the values from on array to another. + + The source array. + The destination array. + + + + Copies the values from on array to another. + + The source array. + The destination array. + + + + Enumerative Combinatorics and Counting. + + + + + Count the number of possible variations without repetition. + The order matters and each object can be chosen only once. + + Number of elements in the set. + Number of elements to choose from the set. Each element is chosen at most once. + Maximum number of distinct variations. + + + + Count the number of possible variations with repetition. + The order matters and each object can be chosen more than once. + + Number of elements in the set. + Number of elements to choose from the set. Each element is chosen 0, 1 or multiple times. + Maximum number of distinct variations with repetition. + + + + Count the number of possible combinations without repetition. + The order does not matter and each object can be chosen only once. + + Number of elements in the set. + Number of elements to choose from the set. Each element is chosen at most once. + Maximum number of combinations. + + + + Count the number of possible combinations with repetition. + The order does not matter and an object can be chosen more than once. + + Number of elements in the set. + Number of elements to choose from the set. Each element is chosen 0, 1 or multiple times. + Maximum number of combinations with repetition. + + + + Count the number of possible permutations (without repetition). + + Number of (distinguishable) elements in the set. + Maximum number of permutations without repetition. + + + + Generate a random permutation, without repetition, by generating the index numbers 0 to N-1 and shuffle them randomly. + Implemented using Fisher-Yates Shuffling. + + An array of length N that contains (in any order) the integers of the interval [0, N). + Number of (distinguishable) elements in the set. + The random number generator to use. Optional; the default random source will be used if null. + + + + Select a random permutation, without repetition, from a data array by reordering the provided array in-place. + Implemented using Fisher-Yates Shuffling. The provided data array will be modified. + + The data array to be reordered. The array will be modified by this routine. + The random number generator to use. Optional; the default random source will be used if null. + + + + Select a random permutation from a data sequence by returning the provided data in random order. + Implemented using Fisher-Yates Shuffling. + + The data elements to be reordered. + The random number generator to use. Optional; the default random source will be used if null. + + + + Generate a random combination, without repetition, by randomly selecting some of N elements. + + Number of elements in the set. + The random number generator to use. Optional; the default random source will be used if null. + Boolean mask array of length N, for each item true if it is selected. + + + + Generate a random combination, without repetition, by randomly selecting k of N elements. + + Number of elements in the set. + Number of elements to choose from the set. Each element is chosen at most once. + The random number generator to use. Optional; the default random source will be used if null. + Boolean mask array of length N, for each item true if it is selected. + + + + Select a random combination, without repetition, from a data sequence by selecting k elements in original order. + + The data source to choose from. + Number of elements (k) to choose from the data set. Each element is chosen at most once. + The random number generator to use. Optional; the default random source will be used if null. + The chosen combination, in the original order. + + + + Generates a random combination, with repetition, by randomly selecting k of N elements. + + Number of elements in the set. + Number of elements to choose from the set. Elements can be chosen more than once. + The random number generator to use. Optional; the default random source will be used if null. + Integer mask array of length N, for each item the number of times it was selected. + + + + Select a random combination, with repetition, from a data sequence by selecting k elements in original order. + + The data source to choose from. + Number of elements (k) to choose from the data set. Elements can be chosen more than once. + The random number generator to use. Optional; the default random source will be used if null. + The chosen combination with repetition, in the original order. + + + + Generate a random variation, without repetition, by randomly selecting k of n elements with order. + Implemented using partial Fisher-Yates Shuffling. + + Number of elements in the set. + Number of elements to choose from the set. Each element is chosen at most once. + The random number generator to use. Optional; the default random source will be used if null. + An array of length K that contains the indices of the selections as integers of the interval [0, N). + + + + Select a random variation, without repetition, from a data sequence by randomly selecting k elements in random order. + Implemented using partial Fisher-Yates Shuffling. + + The data source to choose from. + Number of elements (k) to choose from the set. Each element is chosen at most once. + The random number generator to use. Optional; the default random source will be used if null. + The chosen variation, in random order. + + + + Generate a random variation, with repetition, by randomly selecting k of n elements with order. + + Number of elements in the set. + Number of elements to choose from the set. Elements can be chosen more than once. + The random number generator to use. Optional; the default random source will be used if null. + An array of length K that contains the indices of the selections as integers of the interval [0, N). + + + + Select a random variation, with repetition, from a data sequence by randomly selecting k elements in random order. + + The data source to choose from. + Number of elements (k) to choose from the data set. Elements can be chosen more than once. + The random number generator to use. Optional; the default random source will be used if null. + The chosen variation with repetition, in random order. + + + + 32-bit single precision complex numbers class. + + + + The class Complex32 provides all elementary operations + on complex numbers. All the operators +, -, + *, /, ==, != are defined in the + canonical way. Additional complex trigonometric functions + are also provided. Note that the Complex32 structures + has two special constant values and + . + + + + Complex32 x = new Complex32(1f,2f); + Complex32 y = Complex32.FromPolarCoordinates(1f, Math.Pi); + Complex32 z = (x + y) / (x - y); + + + + For mathematical details about complex numbers, please + have a look at the + Wikipedia + + + + + + The real component of the complex number. + + + + + The imaginary component of the complex number. + + + + + Initializes a new instance of the Complex32 structure with the given real + and imaginary parts. + + The value for the real component. + The value for the imaginary component. + + + + Creates a complex number from a point's polar coordinates. + + A complex number. + The magnitude, which is the distance from the origin (the intersection of the x-axis and the y-axis) to the number. + The phase, which is the angle from the line to the horizontal axis, measured in radians. + + + + Returns a new instance + with a real number equal to zero and an imaginary number equal to zero. + + + + + Returns a new instance + with a real number equal to one and an imaginary number equal to zero. + + + + + Returns a new instance + with a real number equal to zero and an imaginary number equal to one. + + + + + Returns a new instance + with real and imaginary numbers positive infinite. + + + + + Returns a new instance + with real and imaginary numbers not a number. + + + + + Gets the real component of the complex number. + + The real component of the complex number. + + + + Gets the real imaginary component of the complex number. + + The real imaginary component of the complex number. + + + + Gets the phase or argument of this Complex32. + + + Phase always returns a value bigger than negative Pi and + smaller or equal to Pi. If this Complex32 is zero, the Complex32 + is assumed to be positive real with an argument of zero. + + The phase or argument of this Complex32 + + + + Gets the magnitude (or absolute value) of a complex number. + + Assuming that magnitude of (inf,a) and (a,inf) and (inf,inf) is inf and (NaN,a), (a,NaN) and (NaN,NaN) is NaN + The magnitude of the current instance. + + + + Gets the squared magnitude (or squared absolute value) of a complex number. + + The squared magnitude of the current instance. + + + + Gets the unity of this complex (same argument, but on the unit circle; exp(I*arg)) + + The unity of this Complex32. + + + + Gets a value indicating whether the Complex32 is zero. + + true if this instance is zero; otherwise, false. + + + + Gets a value indicating whether the Complex32 is one. + + true if this instance is one; otherwise, false. + + + + Gets a value indicating whether the Complex32 is the imaginary unit. + + true if this instance is ImaginaryOne; otherwise, false. + + + + Gets a value indicating whether the provided Complex32evaluates + to a value that is not a number. + + + true if this instance is ; otherwise, + false. + + + + + Gets a value indicating whether the provided Complex32 evaluates to an + infinite value. + + + true if this instance is infinite; otherwise, false. + + + True if it either evaluates to a complex infinity + or to a directed infinity. + + + + + Gets a value indicating whether the provided Complex32 is real. + + true if this instance is a real number; otherwise, false. + + + + Gets a value indicating whether the provided Complex32 is real and not negative, that is >= 0. + + + true if this instance is real nonnegative number; otherwise, false. + + + + + Exponential of this Complex32 (exp(x), E^x). + + + The exponential of this complex number. + + + + + Natural Logarithm of this Complex32 (Base E). + + The natural logarithm of this complex number. + + + + Common Logarithm of this Complex32 (Base 10). + + The common logarithm of this complex number. + + + + Logarithm of this Complex32 with custom base. + + The logarithm of this complex number. + + + + Raise this Complex32 to the given value. + + + The exponent. + + + The complex number raised to the given exponent. + + + + + Raise this Complex32 to the inverse of the given value. + + + The root exponent. + + + The complex raised to the inverse of the given exponent. + + + + + The Square (power 2) of this Complex32 + + + The square of this complex number. + + + + + The Square Root (power 1/2) of this Complex32 + + + The square root of this complex number. + + + + + Evaluate all square roots of this Complex32. + + + + + Evaluate all cubic roots of this Complex32. + + + + + Equality test. + + One of complex numbers to compare. + The other complex numbers to compare. + true if the real and imaginary components of the two complex numbers are equal; false otherwise. + + + + Inequality test. + + One of complex numbers to compare. + The other complex numbers to compare. + true if the real or imaginary components of the two complex numbers are not equal; false otherwise. + + + + Unary addition. + + The complex number to operate on. + Returns the same complex number. + + + + Unary minus. + + The complex number to operate on. + The negated value of the . + + + Addition operator. Adds two complex numbers together. + The result of the addition. + One of the complex numbers to add. + The other complex numbers to add. + + + Subtraction operator. Subtracts two complex numbers. + The result of the subtraction. + The complex number to subtract from. + The complex number to subtract. + + + Addition operator. Adds a complex number and float together. + The result of the addition. + The complex numbers to add. + The float value to add. + + + Subtraction operator. Subtracts float value from a complex value. + The result of the subtraction. + The complex number to subtract from. + The float value to subtract. + + + Addition operator. Adds a complex number and float together. + The result of the addition. + The float value to add. + The complex numbers to add. + + + Subtraction operator. Subtracts complex value from a float value. + The result of the subtraction. + The float vale to subtract from. + The complex value to subtract. + + + Multiplication operator. Multiplies two complex numbers. + The result of the multiplication. + One of the complex numbers to multiply. + The other complex number to multiply. + + + Multiplication operator. Multiplies a complex number with a float value. + The result of the multiplication. + The float value to multiply. + The complex number to multiply. + + + Multiplication operator. Multiplies a complex number with a float value. + The result of the multiplication. + The complex number to multiply. + The float value to multiply. + + + Division operator. Divides a complex number by another. + Enhanced Smith's algorithm for dividing two complex numbers + + The result of the division. + The dividend. + The divisor. + + + + Helper method for dividing. + + Re first + Im first + Re second + Im second + + + + + Division operator. Divides a float value by a complex number. + Algorithm based on Smith's algorithm + + The result of the division. + The dividend. + The divisor. + + + Division operator. Divides a complex number by a float value. + The result of the division. + The dividend. + The divisor. + + + + Computes the conjugate of a complex number and returns the result. + + + + + Returns the multiplicative inverse of a complex number. + + + + + Converts the value of the current complex number to its equivalent string representation in Cartesian form. + + The string representation of the current instance in Cartesian form. + + + + Converts the value of the current complex number to its equivalent string representation + in Cartesian form by using the specified format for its real and imaginary parts. + + The string representation of the current instance in Cartesian form. + A standard or custom numeric format string. + + is not a valid format string. + + + + Converts the value of the current complex number to its equivalent string representation + in Cartesian form by using the specified culture-specific formatting information. + + The string representation of the current instance in Cartesian form, as specified by . + An object that supplies culture-specific formatting information. + + + Converts the value of the current complex number to its equivalent string representation + in Cartesian form by using the specified format and culture-specific format information for its real and imaginary parts. + The string representation of the current instance in Cartesian form, as specified by and . + A standard or custom numeric format string. + An object that supplies culture-specific formatting information. + + is not a valid format string. + + + + Checks if two complex numbers are equal. Two complex numbers are equal if their + corresponding real and imaginary components are equal. + + + Returns true if the two objects are the same object, or if their corresponding + real and imaginary components are equal, false otherwise. + + + The complex number to compare to with. + + + + + The hash code for the complex number. + + + The hash code of the complex number. + + + The hash code is calculated as + System.Math.Exp(ComplexMath.Absolute(complexNumber)). + + + + + Checks if two complex numbers are equal. Two complex numbers are equal if their + corresponding real and imaginary components are equal. + + + Returns true if the two objects are the same object, or if their corresponding + real and imaginary components are equal, false otherwise. + + + The complex number to compare to with. + + + + + Creates a complex number based on a string. The string can be in the + following formats (without the quotes): 'n', 'ni', 'n +/- ni', + 'ni +/- n', 'n,n', 'n,ni,' '(n,n)', or '(n,ni)', where n is a float. + + + A complex number containing the value specified by the given string. + + + the string to parse. + + + An that supplies culture-specific + formatting information. + + + + + Parse a part (real or complex) from a complex number. + + Start Token. + Is set to true if the part identified itself as being imaginary. + + An that supplies culture-specific + formatting information. + + Resulting part as float. + + + + + Converts the string representation of a complex number to a single-precision complex number equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex number to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will contain complex32.Zero. This parameter is passed uninitialized + + + + + Converts the string representation of a complex number to single-precision complex number equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex number to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will contain complex32.Zero. This parameter is passed uninitialized + + + + + Explicit conversion of a real decimal to a Complex32. + + The decimal value to convert. + The result of the conversion. + + + + Explicit conversion of a Complex to a Complex32. + + The decimal value to convert. + The result of the conversion. + + + + Implicit conversion of a real byte to a Complex32. + + The byte value to convert. + The result of the conversion. + + + + Implicit conversion of a real short to a Complex32. + + The short value to convert. + The result of the conversion. + + + + Implicit conversion of a signed byte to a Complex32. + + The signed byte value to convert. + The result of the conversion. + + + + Implicit conversion of a unsgined real short to a Complex32. + + The unsgined short value to convert. + The result of the conversion. + + + + Implicit conversion of a real int to a Complex32. + + The int value to convert. + The result of the conversion. + + + + Implicit conversion of a BigInteger int to a Complex32. + + The BigInteger value to convert. + The result of the conversion. + + + + Implicit conversion of a real long to a Complex32. + + The long value to convert. + The result of the conversion. + + + + Implicit conversion of a real uint to a Complex32. + + The uint value to convert. + The result of the conversion. + + + + Implicit conversion of a real ulong to a Complex32. + + The ulong value to convert. + The result of the conversion. + + + + Implicit conversion of a real float to a Complex32. + + The float value to convert. + The result of the conversion. + + + + Implicit conversion of a real double to a Complex32. + + The double value to convert. + The result of the conversion. + + + + Converts this Complex32 to a . + + A with the same values as this Complex32. + + + + Returns the additive inverse of a specified complex number. + + The result of the real and imaginary components of the value parameter multiplied by -1. + A complex number. + + + + Computes the conjugate of a complex number and returns the result. + + The conjugate of . + A complex number. + + + + Adds two complex numbers and returns the result. + + The sum of and . + The first complex number to add. + The second complex number to add. + + + + Subtracts one complex number from another and returns the result. + + The result of subtracting from . + The value to subtract from (the minuend). + The value to subtract (the subtrahend). + + + + Returns the product of two complex numbers. + + The product of the and parameters. + The first complex number to multiply. + The second complex number to multiply. + + + + Divides one complex number by another and returns the result. + + The quotient of the division. + The complex number to be divided. + The complex number to divide by. + + + + Returns the multiplicative inverse of a complex number. + + The reciprocal of . + A complex number. + + + + Returns the square root of a specified complex number. + + The square root of . + A complex number. + + + + Gets the absolute value (or magnitude) of a complex number. + + The absolute value of . + A complex number. + + + + Returns e raised to the power specified by a complex number. + + The number e raised to the power . + A complex number that specifies a power. + + + + Returns a specified complex number raised to a power specified by a complex number. + + The complex number raised to the power . + A complex number to be raised to a power. + A complex number that specifies a power. + + + + Returns a specified complex number raised to a power specified by a single-precision floating-point number. + + The complex number raised to the power . + A complex number to be raised to a power. + A single-precision floating-point number that specifies a power. + + + + Returns the natural (base e) logarithm of a specified complex number. + + The natural (base e) logarithm of . + A complex number. + + + + Returns the logarithm of a specified complex number in a specified base. + + The logarithm of in base . + A complex number. + The base of the logarithm. + + + + Returns the base-10 logarithm of a specified complex number. + + The base-10 logarithm of . + A complex number. + + + + Returns the sine of the specified complex number. + + The sine of . + A complex number. + + + + Returns the cosine of the specified complex number. + + The cosine of . + A complex number. + + + + Returns the tangent of the specified complex number. + + The tangent of . + A complex number. + + + + Returns the angle that is the arc sine of the specified complex number. + + The angle which is the arc sine of . + A complex number. + + + + Returns the angle that is the arc cosine of the specified complex number. + + The angle, measured in radians, which is the arc cosine of . + A complex number that represents a cosine. + + + + Returns the angle that is the arc tangent of the specified complex number. + + The angle that is the arc tangent of . + A complex number. + + + + Returns the hyperbolic sine of the specified complex number. + + The hyperbolic sine of . + A complex number. + + + + Returns the hyperbolic cosine of the specified complex number. + + The hyperbolic cosine of . + A complex number. + + + + Returns the hyperbolic tangent of the specified complex number. + + The hyperbolic tangent of . + A complex number. + + + + Extension methods for the Complex type provided by System.Numerics + + + + + Gets the squared magnitude of the Complex number. + + The number to perfom this operation on. + The squared magnitude of the Complex number. + + + + Gets the unity of this complex (same argument, but on the unit circle; exp(I*arg)) + + The unity of this Complex. + + + + Gets the conjugate of the Complex number. + + The number to perfom this operation on. + + The semantic of setting the conjugate is such that + + // a, b of type Complex32 + a.Conjugate = b; + + is equivalent to + + // a, b of type Complex32 + a = b.Conjugate + + + The conjugate of the number. + + + + Returns the multiplicative inverse of a complex number. + + + + + Exponential of this Complex (exp(x), E^x). + + The number to perfom this operation on. + + The exponential of this complex number. + + + + + Natural Logarithm of this Complex (Base E). + + The number to perfom this operation on. + + The natural logarithm of this complex number. + + + + + Common Logarithm of this Complex (Base 10). + + The common logarithm of this complex number. + + + + Logarithm of this Complex with custom base. + + The logarithm of this complex number. + + + + Raise this Complex to the given value. + + The number to perfom this operation on. + + The exponent. + + + The complex number raised to the given exponent. + + + + + Raise this Complex to the inverse of the given value. + + The number to perfom this operation on. + + The root exponent. + + + The complex raised to the inverse of the given exponent. + + + + + The Square (power 2) of this Complex + + The number to perfom this operation on. + + The square of this complex number. + + + + + The Square Root (power 1/2) of this Complex + + The number to perfom this operation on. + + The square root of this complex number. + + + + + Evaluate all square roots of this Complex. + + + + + Evaluate all cubic roots of this Complex. + + + + + Gets a value indicating whether the Complex32 is zero. + + The number to perfom this operation on. + true if this instance is zero; otherwise, false. + + + + Gets a value indicating whether the Complex32 is one. + + The number to perfom this operation on. + true if this instance is one; otherwise, false. + + + + Gets a value indicating whether the Complex32 is the imaginary unit. + + true if this instance is ImaginaryOne; otherwise, false. + The number to perfom this operation on. + + + + Gets a value indicating whether the provided Complex32evaluates + to a value that is not a number. + + The number to perfom this operation on. + + true if this instance is NaN; otherwise, + false. + + + + + Gets a value indicating whether the provided Complex32 evaluates to an + infinite value. + + The number to perfom this operation on. + + true if this instance is infinite; otherwise, false. + + + True if it either evaluates to a complex infinity + or to a directed infinity. + + + + + Gets a value indicating whether the provided Complex32 is real. + + The number to perfom this operation on. + true if this instance is a real number; otherwise, false. + + + + Gets a value indicating whether the provided Complex32 is real and not negative, that is >= 0. + + The number to perfom this operation on. + + true if this instance is real nonnegative number; otherwise, false. + + + + + Returns a Norm of a value of this type, which is appropriate for measuring how + close this value is to zero. + + + + + Returns a Norm of a value of this type, which is appropriate for measuring how + close this value is to zero. + + + + + Returns a Norm of the difference of two values of this type, which is + appropriate for measuring how close together these two values are. + + + + + Returns a Norm of the difference of two values of this type, which is + appropriate for measuring how close together these two values are. + + + + + Creates a complex number based on a string. The string can be in the + following formats (without the quotes): 'n', 'ni', 'n +/- ni', + 'ni +/- n', 'n,n', 'n,ni,' '(n,n)', or '(n,ni)', where n is a double. + + + A complex number containing the value specified by the given string. + + + The string to parse. + + + + + Creates a complex number based on a string. The string can be in the + following formats (without the quotes): 'n', 'ni', 'n +/- ni', + 'ni +/- n', 'n,n', 'n,ni,' '(n,n)', or '(n,ni)', where n is a double. + + + A complex number containing the value specified by the given string. + + + the string to parse. + + + An that supplies culture-specific + formatting information. + + + + + Parse a part (real or complex) from a complex number. + + Start Token. + Is set to true if the part identified itself as being imaginary. + + An that supplies culture-specific + formatting information. + + Resulting part as double. + + + + + Converts the string representation of a complex number to a double-precision complex number equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex number to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will contain Complex.Zero. This parameter is passed uninitialized. + + + + + Converts the string representation of a complex number to double-precision complex number equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex number to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will contain complex32.Zero. This parameter is passed uninitialized + + + + + Creates a Complex32 number based on a string. The string can be in the + following formats (without the quotes): 'n', 'ni', 'n +/- ni', + 'ni +/- n', 'n,n', 'n,ni,' '(n,n)', or '(n,ni)', where n is a double. + + + A complex number containing the value specified by the given string. + + + the string to parse. + + + + + Creates a Complex32 number based on a string. The string can be in the + following formats (without the quotes): 'n', 'ni', 'n +/- ni', + 'ni +/- n', 'n,n', 'n,ni,' '(n,n)', or '(n,ni)', where n is a double. + + + A complex number containing the value specified by the given string. + + + the string to parse. + + + An that supplies culture-specific + formatting information. + + + + + Converts the string representation of a complex number to a single-precision complex number equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex number to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will contain complex32.Zero. This parameter is passed uninitialized. + + + + + Converts the string representation of a complex number to single-precision complex number equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex number to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will contain Complex.Zero. This parameter is passed uninitialized. + + + + + A collection of frequently used mathematical constants. + + + + The number e + + + The number log[2](e) + + + The number log[10](e) + + + The number log[e](2) + + + The number log[e](10) + + + The number log[e](pi) + + + The number log[e](2*pi)/2 + + + The number 1/e + + + The number sqrt(e) + + + The number sqrt(2) + + + The number sqrt(3) + + + The number sqrt(1/2) = 1/sqrt(2) = sqrt(2)/2 + + + The number sqrt(3)/2 + + + The number pi + + + The number pi*2 + + + The number pi/2 + + + The number pi*3/2 + + + The number pi/4 + + + The number sqrt(pi) + + + The number sqrt(2pi) + + + The number sqrt(2*pi*e) + + + The number log(sqrt(2*pi)) + + + The number log(sqrt(2*pi*e)) + + + The number log(2 * sqrt(e / pi)) + + + The number 1/pi + + + The number 2/pi + + + The number 1/sqrt(pi) + + + The number 1/sqrt(2pi) + + + The number 2/sqrt(pi) + + + The number 2 * sqrt(e / pi) + + + The number (pi)/180 - factor to convert from Degree (deg) to Radians (rad). + + + + + The number (pi)/200 - factor to convert from NewGrad (grad) to Radians (rad). + + + + + The number ln(10)/20 - factor to convert from Power Decibel (dB) to Neper (Np). Use this version when the Decibel represent a power gain but the compared values are not powers (e.g. amplitude, current, voltage). + + + The number ln(10)/10 - factor to convert from Neutral Decibel (dB) to Neper (Np). Use this version when either both or neither of the Decibel and the compared values represent powers. + + + The Catalan constant + Sum(k=0 -> inf){ (-1)^k/(2*k + 1)2 } + + + The Euler-Mascheroni constant + lim(n -> inf){ Sum(k=1 -> n) { 1/k - log(n) } } + + + The number (1+sqrt(5))/2, also known as the golden ratio + + + The Glaisher constant + e^(1/12 - Zeta(-1)) + + + The Khinchin constant + prod(k=1 -> inf){1+1/(k*(k+2))^log(k,2)} + + + + The size of a double in bytes. + + + + + The size of an int in bytes. + + + + + The size of a float in bytes. + + + + + The size of a Complex in bytes. + + + + + The size of a Complex in bytes. + + + + Speed of Light in Vacuum: c_0 = 2.99792458e8 [m s^-1] (defined, exact; 2007 CODATA) + + + Magnetic Permeability in Vacuum: mu_0 = 4*Pi * 10^-7 [N A^-2 = kg m A^-2 s^-2] (defined, exact; 2007 CODATA) + + + Electric Permittivity in Vacuum: epsilon_0 = 1/(mu_0*c_0^2) [F m^-1 = A^2 s^4 kg^-1 m^-3] (defined, exact; 2007 CODATA) + + + Characteristic Impedance of Vacuum: Z_0 = mu_0*c_0 [Ohm = m^2 kg s^-3 A^-2] (defined, exact; 2007 CODATA) + + + Newtonian Constant of Gravitation: G = 6.67429e-11 [m^3 kg^-1 s^-2] (2007 CODATA) + + + Planck's constant: h = 6.62606896e-34 [J s = m^2 kg s^-1] (2007 CODATA) + + + Reduced Planck's constant: h_bar = h / (2*Pi) [J s = m^2 kg s^-1] (2007 CODATA) + + + Planck mass: m_p = (h_bar*c_0/G)^(1/2) [kg] (2007 CODATA) + + + Planck temperature: T_p = (h_bar*c_0^5/G)^(1/2)/k [K] (2007 CODATA) + + + Planck length: l_p = h_bar/(m_p*c_0) [m] (2007 CODATA) + + + Planck time: t_p = l_p/c_0 [s] (2007 CODATA) + + + Elementary Electron Charge: e = 1.602176487e-19 [C = A s] (2007 CODATA) + + + Magnetic Flux Quantum: theta_0 = h/(2*e) [Wb = m^2 kg s^-2 A^-1] (2007 CODATA) + + + Conductance Quantum: G_0 = 2*e^2/h [S = m^-2 kg^-1 s^3 A^2] (2007 CODATA) + + + Josephson Constant: K_J = 2*e/h [Hz V^-1] (2007 CODATA) + + + Von Klitzing Constant: R_K = h/e^2 [Ohm = m^2 kg s^-3 A^-2] (2007 CODATA) + + + Bohr Magneton: mu_B = e*h_bar/2*m_e [J T^-1] (2007 CODATA) + + + Nuclear Magneton: mu_N = e*h_bar/2*m_p [J T^-1] (2007 CODATA) + + + Fine Structure Constant: alpha = e^2/4*Pi*e_0*h_bar*c_0 [1] (2007 CODATA) + + + Rydberg Constant: R_infty = alpha^2*m_e*c_0/2*h [m^-1] (2007 CODATA) + + + Bor Radius: a_0 = alpha/4*Pi*R_infty [m] (2007 CODATA) + + + Hartree Energy: E_h = 2*R_infty*h*c_0 [J] (2007 CODATA) + + + Quantum of Circulation: h/2*m_e [m^2 s^-1] (2007 CODATA) + + + Fermi Coupling Constant: G_F/(h_bar*c_0)^3 [GeV^-2] (2007 CODATA) + + + Weak Mixin Angle: sin^2(theta_W) [1] (2007 CODATA) + + + Electron Mass: [kg] (2007 CODATA) + + + Electron Mass Energy Equivalent: [J] (2007 CODATA) + + + Electron Molar Mass: [kg mol^-1] (2007 CODATA) + + + Electron Compton Wavelength: [m] (2007 CODATA) + + + Classical Electron Radius: [m] (2007 CODATA) + + + Tomson Cross Section: [m^2] (2002 CODATA) + + + Electron Magnetic Moment: [J T^-1] (2007 CODATA) + + + Electon G-Factor: [1] (2007 CODATA) + + + Muon Mass: [kg] (2007 CODATA) + + + Muon Mass Energy Equivalent: [J] (2007 CODATA) + + + Muon Molar Mass: [kg mol^-1] (2007 CODATA) + + + Muon Compton Wavelength: [m] (2007 CODATA) + + + Muon Magnetic Moment: [J T^-1] (2007 CODATA) + + + Muon G-Factor: [1] (2007 CODATA) + + + Tau Mass: [kg] (2007 CODATA) + + + Tau Mass Energy Equivalent: [J] (2007 CODATA) + + + Tau Molar Mass: [kg mol^-1] (2007 CODATA) + + + Tau Compton Wavelength: [m] (2007 CODATA) + + + Proton Mass: [kg] (2007 CODATA) + + + Proton Mass Energy Equivalent: [J] (2007 CODATA) + + + Proton Molar Mass: [kg mol^-1] (2007 CODATA) + + + Proton Compton Wavelength: [m] (2007 CODATA) + + + Proton Magnetic Moment: [J T^-1] (2007 CODATA) + + + Proton G-Factor: [1] (2007 CODATA) + + + Proton Shielded Magnetic Moment: [J T^-1] (2007 CODATA) + + + Proton Gyro-Magnetic Ratio: [s^-1 T^-1] (2007 CODATA) + + + Proton Shielded Gyro-Magnetic Ratio: [s^-1 T^-1] (2007 CODATA) + + + Neutron Mass: [kg] (2007 CODATA) + + + Neutron Mass Energy Equivalent: [J] (2007 CODATA) + + + Neutron Molar Mass: [kg mol^-1] (2007 CODATA) + + + Neuron Compton Wavelength: [m] (2007 CODATA) + + + Neutron Magnetic Moment: [J T^-1] (2007 CODATA) + + + Neutron G-Factor: [1] (2007 CODATA) + + + Neutron Gyro-Magnetic Ratio: [s^-1 T^-1] (2007 CODATA) + + + Deuteron Mass: [kg] (2007 CODATA) + + + Deuteron Mass Energy Equivalent: [J] (2007 CODATA) + + + Deuteron Molar Mass: [kg mol^-1] (2007 CODATA) + + + Deuteron Magnetic Moment: [J T^-1] (2007 CODATA) + + + Helion Mass: [kg] (2007 CODATA) + + + Helion Mass Energy Equivalent: [J] (2007 CODATA) + + + Helion Molar Mass: [kg mol^-1] (2007 CODATA) + + + Avogadro constant: [mol^-1] (2010 CODATA) + + + The SI prefix factor corresponding to 1 000 000 000 000 000 000 000 000 + + + The SI prefix factor corresponding to 1 000 000 000 000 000 000 000 + + + The SI prefix factor corresponding to 1 000 000 000 000 000 000 + + + The SI prefix factor corresponding to 1 000 000 000 000 000 + + + The SI prefix factor corresponding to 1 000 000 000 000 + + + The SI prefix factor corresponding to 1 000 000 000 + + + The SI prefix factor corresponding to 1 000 000 + + + The SI prefix factor corresponding to 1 000 + + + The SI prefix factor corresponding to 100 + + + The SI prefix factor corresponding to 10 + + + The SI prefix factor corresponding to 0.1 + + + The SI prefix factor corresponding to 0.01 + + + The SI prefix factor corresponding to 0.001 + + + The SI prefix factor corresponding to 0.000 001 + + + The SI prefix factor corresponding to 0.000 000 001 + + + The SI prefix factor corresponding to 0.000 000 000 001 + + + The SI prefix factor corresponding to 0.000 000 000 000 001 + + + The SI prefix factor corresponding to 0.000 000 000 000 000 001 + + + The SI prefix factor corresponding to 0.000 000 000 000 000 000 001 + + + The SI prefix factor corresponding to 0.000 000 000 000 000 000 000 001 + + + + Sets parameters for the library. + + + + + Use a specific provider if configured, e.g. using + environment variables, or fall back to the best providers. + + + + + Use the best provider available. + + + + + Gets or sets a value indicating whether the distribution classes check validate each parameter. + For the multivariate distributions this could involve an expensive matrix factorization. + The default setting of this property is true. + + + + + Gets or sets a value indicating whether to use thread safe random number generators (RNG). + Thread safe RNG about two and half time slower than non-thread safe RNG. + + + true to use thread safe random number generators ; otherwise, false. + + + + + Optional path to try to load native provider binaries from. + + + + + Gets or sets the linear algebra provider. Consider to use UseNativeMKL or UseManaged instead. + + The linear algebra provider. + + + + Gets or sets the fourier transform provider. Consider to use UseNativeMKL or UseManaged instead. + + The linear algebra provider. + + + + Gets or sets a value indicating how many parallel worker threads shall be used + when parallelization is applicable. + + Default to the number of processor cores, must be between 1 and 1024 (inclusive). + + + + Gets or sets the TaskScheduler used to schedule the worker tasks. + + + + + Gets or sets the the block size to use for + the native linear algebra provider. + + The block size. Default 512, must be at least 32. + + + + Gets or sets the order of the matrix when linear algebra provider + must calculate multiply in parallel threads. + + The order. Default 64, must be at least 3. + + + + Gets or sets the number of elements a vector or matrix + must contain before we multiply threads. + + Number of elements. Default 300, must be at least 3. + + + + Numerical Derivative. + + + + + Initialized a NumericalDerivative with the given points and center. + + + + + Initialized a NumericalDerivative with the default points and center for the given order. + + + + + Evaluates the derivative of a scalar univariate function. + + Univariate function handle. + Point at which to evaluate the derivative. + Derivative order. + + + + Creates a function handle for the derivative of a scalar univariate function. + + Univariate function handle. + Derivative order. + + + + Evaluates the first derivative of a scalar univariate function. + + Univariate function handle. + Point at which to evaluate the derivative. + + + + Creates a function handle for the first derivative of a scalar univariate function. + + Univariate function handle. + + + + Evaluates the second derivative of a scalar univariate function. + + Univariate function handle. + Point at which to evaluate the derivative. + + + + Creates a function handle for the second derivative of a scalar univariate function. + + Univariate function handle. + + + + Evaluates the partial derivative of a multivariate function. + + Multivariate function handle. + Vector at which to evaluate the derivative. + Index of independent variable for partial derivative. + Derivative order. + + + + Creates a function handle for the partial derivative of a multivariate function. + + Multivariate function handle. + Index of independent variable for partial derivative. + Derivative order. + + + + Evaluates the first partial derivative of a multivariate function. + + Multivariate function handle. + Vector at which to evaluate the derivative. + Index of independent variable for partial derivative. + + + + Creates a function handle for the first partial derivative of a multivariate function. + + Multivariate function handle. + Index of independent variable for partial derivative. + + + + Evaluates the partial derivative of a bivariate function. + + Bivariate function handle. + First argument at which to evaluate the derivative. + Second argument at which to evaluate the derivative. + Index of independent variable for partial derivative. + Derivative order. + + + + Creates a function handle for the partial derivative of a bivariate function. + + Bivariate function handle. + Index of independent variable for partial derivative. + Derivative order. + + + + Evaluates the first partial derivative of a bivariate function. + + Bivariate function handle. + First argument at which to evaluate the derivative. + Second argument at which to evaluate the derivative. + Index of independent variable for partial derivative. + + + + Creates a function handle for the first partial derivative of a bivariate function. + + Bivariate function handle. + Index of independent variable for partial derivative. + + + + Class to calculate finite difference coefficients using Taylor series expansion method. + + + For n points, coefficients are calculated up to the maximum derivative order possible (n-1). + The current function value position specifies the "center" for surrounding coefficients. + Selecting the first, middle or last positions represent forward, backwards and central difference methods. + + + + + + + Number of points for finite difference coefficients. Changing this value recalculates the coefficients table. + + + + + Initializes a new instance of the class. + + Number of finite difference coefficients. + + + + Gets the finite difference coefficients for a specified center and order. + + Current function position with respect to coefficients. Must be within point range. + Order of finite difference coefficients. + Vector of finite difference coefficients. + + + + Gets the finite difference coefficients for all orders at a specified center. + + Current function position with respect to coefficients. Must be within point range. + Rectangular array of coefficients, with columns specifing order. + + + + Type of finite different step size. + + + + + The absolute step size value will be used in numerical derivatives, regardless of order or function parameters. + + + + + A base step size value, h, will be scaled according to the function input parameter. A common example is hx = h*(1+abs(x)), however + this may vary depending on implementation. This definition only guarantees that the only scaling will be relative to the + function input parameter and not the order of the finite difference derivative. + + + + + A base step size value, eps (typically machine precision), is scaled according to the finite difference coefficient order + and function input parameter. The initial scaling according to finite different coefficient order can be thought of as producing a + base step size, h, that is equivalent to scaling. This stepsize is then scaled according to the function + input parameter. Although implementation may vary, an example of second order accurate scaling may be (eps)^(1/3)*(1+abs(x)). + + + + + Class to evaluate the numerical derivative of a function using finite difference approximations. + Variable point and center methods can be initialized . + This class can also be used to return function handles (delegates) for a fixed derivative order and variable. + It is possible to evaluate the derivative and partial derivative of univariate and multivariate functions respectively. + + + + + Initializes a NumericalDerivative class with the default 3 point center difference method. + + + + + Initialized a NumericalDerivative class. + + Number of points for finite difference derivatives. + Location of the center with respect to other points. Value ranges from zero to points-1. + + + + Sets and gets the finite difference step size. This value is for each function evaluation if relative stepsize types are used. + If the base step size used in scaling is desired, see . + + + Setting then getting the StepSize may return a different value. This is not unusual since a user-defined step size is converted to a + base-2 representable number to improve finite difference accuracy. + + + + + Sets and gets the base fininte difference step size. This assigned value to this parameter is only used if is set to RelativeX. + However, if the StepType is Relative, it will contain the base step size computed from based on the finite difference order. + + + + + Sets and gets the base finite difference step size. This parameter is only used if is set to Relative. + By default this is set to machine epsilon, from which is computed. + + + + + Sets and gets the location of the center point for the finite difference derivative. + + + + + Number of times a function is evaluated for numerical derivatives. + + + + + Type of step size for computing finite differences. If set to absolute, dx = h. + If set to relative, dx = (1+abs(x))*h^(2/(order+1)). This provides accurate results when + h is approximately equal to the square-root of machine accuracy, epsilon. + + + + + Evaluates the derivative of equidistant points using the finite difference method. + + Vector of points StepSize apart. + Derivative order. + Finite difference step size. + Derivative of points of the specified order. + + + + Evaluates the derivative of a scalar univariate function. + + + Supplying the optional argument currentValue will reduce the number of function evaluations + required to calculate the finite difference derivative. + + Function handle. + Point at which to compute the derivative. + Derivative order. + Current function value at center. + Function derivative at x of the specified order. + + + + Creates a function handle for the derivative of a scalar univariate function. + + Input function handle. + Derivative order. + Function handle that evaluates the derivative of input function at a fixed order. + + + + Evaluates the partial derivative of a multivariate function. + + Multivariate function handle. + Vector at which to evaluate the derivative. + Index of independent variable for partial derivative. + Derivative order. + Current function value at center. + Function partial derivative at x of the specified order. + + + + Evaluates the partial derivatives of a multivariate function array. + + + This function assumes the input vector x is of the correct length for f. + + Multivariate vector function array handle. + Vector at which to evaluate the derivatives. + Index of independent variable for partial derivative. + Derivative order. + Current function value at center. + Vector of functions partial derivatives at x of the specified order. + + + + Creates a function handle for the partial derivative of a multivariate function. + + Input function handle. + Index of the independent variable for partial derivative. + Derivative order. + Function handle that evaluates partial derivative of input function at a fixed order. + + + + Creates a function handle for the partial derivative of a vector multivariate function. + + Input function handle. + Index of the independent variable for partial derivative. + Derivative order. + Function handle that evaluates partial derivative of input function at fixed order. + + + + Evaluates the mixed partial derivative of variable order for multivariate functions. + + + This function recursively uses to evaluate mixed partial derivative. + Therefore, it is more efficient to call for higher order derivatives of + a single independent variable. + + Multivariate function handle. + Points at which to evaluate the derivative. + Vector of indices for the independent variables at descending derivative orders. + Highest order of differentiation. + Current function value at center. + Function mixed partial derivative at x of the specified order. + + + + Evaluates the mixed partial derivative of variable order for multivariate function arrays. + + + This function recursively uses to evaluate mixed partial derivative. + Therefore, it is more efficient to call for higher order derivatives of + a single independent variable. + + Multivariate function array handle. + Vector at which to evaluate the derivative. + Vector of indices for the independent variables at descending derivative orders. + Highest order of differentiation. + Current function value at center. + Function mixed partial derivatives at x of the specified order. + + + + Creates a function handle for the mixed partial derivative of a multivariate function. + + Input function handle. + Vector of indices for the independent variables at descending derivative orders. + Highest derivative order. + Function handle that evaluates the fixed mixed partial derivative of input function at fixed order. + + + + Creates a function handle for the mixed partial derivative of a multivariate vector function. + + Input vector function handle. + Vector of indices for the independent variables at descending derivative orders. + Highest derivative order. + Function handle that evaluates the fixed mixed partial derivative of input function at fixed order. + + + + Resets the evaluation counter. + + + + + Class for evaluating the Hessian of a smooth continuously differentiable function using finite differences. + By default, a central 3-point method is used. + + + + + Number of function evaluations. + + + + + Creates a numerical Hessian object with a three point central difference method. + + + + + Creates a numerical Hessian with a specified differentiation scheme. + + Number of points for Hessian evaluation. + Center point for differentiation. + + + + Evaluates the Hessian of the scalar univariate function f at points x. + + Scalar univariate function handle. + Point at which to evaluate Hessian. + Hessian tensor. + + + + Evaluates the Hessian of a multivariate function f at points x. + + + This method of computing the Hessian is only vaid for Lipschitz continuous functions. + The function mirrors the Hessian along the diagonal since d2f/dxdy = d2f/dydx for continuously differentiable functions. + + Multivariate function handle.> + Points at which to evaluate Hessian.> + Hessian tensor. + + + + Resets the function evaluation counter for the Hessian. + + + + + Class for evaluating the Jacobian of a function using finite differences. + By default, a central 3-point method is used. + + + + + Number of function evaluations. + + + + + Creates a numerical Jacobian object with a three point central difference method. + + + + + Creates a numerical Jacobian with a specified differentiation scheme. + + Number of points for Jacobian evaluation. + Center point for differentiation. + + + + Evaluates the Jacobian of scalar univariate function f at point x. + + Scalar univariate function handle. + Point at which to evaluate Jacobian. + Jacobian vector. + + + + Evaluates the Jacobian of a multivariate function f at vector x. + + + This function assumes that the length of vector x consistent with the argument count of f. + + Multivariate function handle. + Points at which to evaluate Jacobian. + Jacobian vector. + + + + Evaluates the Jacobian of a multivariate function f at vector x given a current function value. + + + To minimize the number of function evaluations, a user can supply the current value of the function + to be used in computing the Jacobian. This value must correspond to the "center" location for the + finite differencing. If a scheme is used where the center value is not evaluated, this will provide no + added efficiency. This method also assumes that the length of vector x consistent with the argument count of f. + + Multivariate function handle. + Points at which to evaluate Jacobian. + Current function value at finite difference center. + Jacobian vector. + + + + Evaluates the Jacobian of a multivariate function array f at vector x. + + Multivariate function array handle. + Vector at which to evaluate Jacobian. + Jacobian matrix. + + + + Evaluates the Jacobian of a multivariate function array f at vector x given a vector of current function values. + + + To minimize the number of function evaluations, a user can supply a vector of current values of the functions + to be used in computing the Jacobian. These value must correspond to the "center" location for the + finite differencing. If a scheme is used where the center value is not evaluated, this will provide no + added efficiency. This method also assumes that the length of vector x consistent with the argument count of f. + + Multivariate function array handle. + Vector at which to evaluate Jacobian. + Vector of current function values. + Jacobian matrix. + + + + Resets the function evaluation counter for the Jacobian. + + + + + Metrics to measure the distance between two structures. + + + + + Sum of Absolute Difference (SAD), i.e. the L1-norm (Manhattan) of the difference. + + + + + Sum of Absolute Difference (SAD), i.e. the L1-norm (Manhattan) of the difference. + + + + + Sum of Absolute Difference (SAD), i.e. the L1-norm (Manhattan) of the difference. + + + + + Mean-Absolute Error (MAE), i.e. the normalized L1-norm (Manhattan) of the difference. + + + + + Mean-Absolute Error (MAE), i.e. the normalized L1-norm (Manhattan) of the difference. + + + + + Mean-Absolute Error (MAE), i.e. the normalized L1-norm (Manhattan) of the difference. + + + + + Sum of Squared Difference (SSD), i.e. the squared L2-norm (Euclidean) of the difference. + + + + + Sum of Squared Difference (SSD), i.e. the squared L2-norm (Euclidean) of the difference. + + + + + Sum of Squared Difference (SSD), i.e. the squared L2-norm (Euclidean) of the difference. + + + + + Mean-Squared Error (MSE), i.e. the normalized squared L2-norm (Euclidean) of the difference. + + + + + Mean-Squared Error (MSE), i.e. the normalized squared L2-norm (Euclidean) of the difference. + + + + + Mean-Squared Error (MSE), i.e. the normalized squared L2-norm (Euclidean) of the difference. + + + + + Euclidean Distance, i.e. the L2-norm of the difference. + + + + + Euclidean Distance, i.e. the L2-norm of the difference. + + + + + Euclidean Distance, i.e. the L2-norm of the difference. + + + + + Manhattan Distance, i.e. the L1-norm of the difference. + + + + + Manhattan Distance, i.e. the L1-norm of the difference. + + + + + Manhattan Distance, i.e. the L1-norm of the difference. + + + + + Chebyshev Distance, i.e. the Infinity-norm of the difference. + + + + + Chebyshev Distance, i.e. the Infinity-norm of the difference. + + + + + Chebyshev Distance, i.e. the Infinity-norm of the difference. + + + + + Minkowski Distance, i.e. the generalized p-norm of the difference. + + + + + Minkowski Distance, i.e. the generalized p-norm of the difference. + + + + + Minkowski Distance, i.e. the generalized p-norm of the difference. + + + + + Canberra Distance, a weighted version of the L1-norm of the difference. + + + + + Canberra Distance, a weighted version of the L1-norm of the difference. + + + + + Cosine Distance, representing the angular distance while ignoring the scale. + + + + + Cosine Distance, representing the angular distance while ignoring the scale. + + + + + Hamming Distance, i.e. the number of positions that have different values in the vectors. + + + + + Hamming Distance, i.e. the number of positions that have different values in the vectors. + + + + + Pearson's distance, i.e. 1 - the person correlation coefficient. + + + + + Jaccard distance, i.e. 1 - the Jaccard index. + + Thrown if a or b are null. + Throw if a and b are of different lengths. + Jaccard distance. + + + + Jaccard distance, i.e. 1 - the Jaccard index. + + Thrown if a or b are null. + Throw if a and b are of different lengths. + Jaccard distance. + + + + Discrete Univariate Bernoulli distribution. + The Bernoulli distribution is a distribution over bits. The parameter + p specifies the probability that a 1 is generated. + Wikipedia - Bernoulli distribution. + + + + + Initializes a new instance of the Bernoulli class. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + If the Bernoulli parameter is not in the range [0,1]. + + + + Initializes a new instance of the Bernoulli class. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + The random number generator which is used to draw random samples. + If the Bernoulli parameter is not in the range [0,1]. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Gets the probability of generating a one. Range: 0 ≤ p ≤ 1. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the mode of the distribution. + + + + + Gets all modes of the distribution. + + + + + Gets the median of the distribution. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + the cumulative distribution at location . + + + + + Generates one sample from the Bernoulli distribution. + + The random source to use. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + A random sample from the Bernoulli distribution. + + + + Samples a Bernoulli distributed random variable. + + A sample from the Bernoulli distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of Bernoulli distributed random variables. + + a sequence of samples from the distribution. + + + + Samples a Bernoulli distributed random variable. + + The random number generator to use. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + A sample from the Bernoulli distribution. + + + + Samples a sequence of Bernoulli distributed random variables. + + The random number generator to use. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + a sequence of samples from the distribution. + + + + Samples a Bernoulli distributed random variable. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + A sample from the Bernoulli distribution. + + + + Samples a sequence of Bernoulli distributed random variables. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + a sequence of samples from the distribution. + + + + Continuous Univariate Beta distribution. + For details about this distribution, see + Wikipedia - Beta distribution. + + + There are a few special cases for the parameterization of the Beta distribution. When both + shape parameters are positive infinity, the Beta distribution degenerates to a point distribution + at 0.5. When one of the shape parameters is positive infinity, the distribution degenerates to a point + distribution at the positive infinity. When both shape parameters are 0.0, the Beta distribution + degenerates to a Bernoulli distribution with parameter 0.5. When one shape parameter is 0.0, the + distribution degenerates to a point distribution at the non-zero shape parameter. + + + + + Initializes a new instance of the Beta class. + + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + + + + Initializes a new instance of the Beta class. + + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + A string representation of the Beta distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + + + + Gets the α shape parameter of the Beta distribution. Range: α ≥ 0. + + + + + Gets the β shape parameter of the Beta distribution. Range: β ≥ 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the Beta distribution. + + + + + Gets the variance of the Beta distribution. + + + + + Gets the standard deviation of the Beta distribution. + + + + + Gets the entropy of the Beta distribution. + + + + + Gets the skewness of the Beta distribution. + + + + + Gets the mode of the Beta distribution; when there are multiple answers, this routine will return 0.5. + + + + + Gets the median of the Beta distribution. + + + + + Gets the minimum of the Beta distribution. + + + + + Gets the maximum of the Beta distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Generates a sample from the Beta distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Beta distribution. + + a sequence of samples from the distribution. + + + + Samples Beta distributed random variables by sampling two Gamma variables and normalizing. + + The random number generator to use. + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + a random number from the Beta distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Generates a sample from the distribution. + + The random number generator to use. + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Initializes a new instance of the BetaScaled class. + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + + + + Initializes a new instance of the BetaScaled class. + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The random number generator which is used to draw random samples. + + + + Create a Beta PERT distribution, used in risk analysis and other domains where an expert forecast + is used to construct an underlying beta distribution. + + The minimum value. + The maximum value. + The most likely value (mode). + The random number generator which is used to draw random samples. + The Beta distribution derived from the PERT parameters. + + + + A string representation of the distribution. + + A string representation of the BetaScaled distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + + + + Gets the α shape parameter of the BetaScaled distribution. Range: α > 0. + + + + + Gets the β shape parameter of the BetaScaled distribution. Range: β > 0. + + + + + Gets the location (μ) of the BetaScaled distribution. + + + + + Gets the scale (σ) of the BetaScaled distribution. Range: σ > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the BetaScaled distribution. + + + + + Gets the variance of the BetaScaled distribution. + + + + + Gets the standard deviation of the BetaScaled distribution. + + + + + Gets the entropy of the BetaScaled distribution. + + + + + Gets the skewness of the BetaScaled distribution. + + + + + Gets the mode of the BetaScaled distribution; when there are multiple answers, this routine will return 0.5. + + + + + Gets the median of the BetaScaled distribution. + + + + + Gets the minimum of the BetaScaled distribution. + + + + + Gets the maximum of the BetaScaled distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Generates a sample from the distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Generates a sample from the distribution. + + The random number generator to use. + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Discrete Univariate Binomial distribution. + For details about this distribution, see + Wikipedia - Binomial distribution. + + + The distribution is parameterized by a probability (between 0.0 and 1.0). + + + + + Initializes a new instance of the Binomial class. + + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + If is not in the interval [0.0,1.0]. + If is negative. + + + + Initializes a new instance of the Binomial class. + + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + The random number generator which is used to draw random samples. + If is not in the interval [0.0,1.0]. + If is negative. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + + + + Gets the success probability in each trial. Range: 0 ≤ p ≤ 1. + + + + + Gets the number of trials. Range: n ≥ 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the mode of the distribution. + + + + + Gets all modes of the distribution. + + + + + Gets the median of the distribution. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + the cumulative distribution at location . + + + + + Generates a sample from the Binomial distribution without doing parameter checking. + + The random number generator to use. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + The number of successful trials. + + + + Samples a Binomially distributed random variable. + + The number of successes in N trials. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of Binomially distributed random variables. + + a sequence of successes in N trials. + + + + Samples a binomially distributed random variable. + + The random number generator to use. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + The number of successes in trials. + + + + Samples a sequence of binomially distributed random variable. + + The random number generator to use. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + a sequence of successes in trials. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + a sequence of successes in trials. + + + + Samples a binomially distributed random variable. + + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + The number of successes in trials. + + + + Samples a sequence of binomially distributed random variable. + + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + a sequence of successes in trials. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + a sequence of successes in trials. + + + + Discrete Univariate Categorical distribution. + For details about this distribution, see + Wikipedia - Categorical distribution. This + distribution is sometimes called the Discrete distribution. + + + The distribution is parameterized by a vector of ratios: in other words, the parameter + does not have to be normalized and sum to 1. The reason is that some vectors can't be exactly normalized + to sum to 1 in floating point representation. + + + Support: 0..k where k = length(probability mass array)-1 + + + + + Initializes a new instance of the Categorical class. + + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + If any of the probabilities are negative or do not sum to one. + + + + Initializes a new instance of the Categorical class. + + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + The random number generator which is used to draw random samples. + If any of the probabilities are negative or do not sum to one. + + + + Initializes a new instance of the Categorical class from a . The distribution + will not be automatically updated when the histogram changes. The categorical distribution will have + one value for each bucket and a probability for that value proportional to the bucket count. + + The histogram from which to create the categorical variable. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Checks whether the parameters of the distribution are valid. + + An array of nonnegative ratios: this array does not need to be normalized as this is often impossible using floating point arithmetic. + If any of the probabilities are negative returns false, or if the sum of parameters is 0.0; otherwise true + + + + Checks whether the parameters of the distribution are valid. + + An array of nonnegative ratios: this array does not need to be normalized as this is often impossible using floating point arithmetic. + If any of the probabilities are negative returns false, or if the sum of parameters is 0.0; otherwise true + + + + Gets the probability mass vector (non-negative ratios) of the multinomial. + + Sometimes the normalized probability vector cannot be represented exactly in a floating point representation. + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + Throws a . + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Gets he mode of the distribution. + + Throws a . + + + + Gets the median of the distribution. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. + + A real number between 0 and 1. + An integer between 0 and the size of the categorical (exclusive), that corresponds to the inverse CDF for the given probability. + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. + + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + A real number between 0 and 1. + An integer between 0 and the size of the categorical (exclusive), that corresponds to the inverse CDF for the given probability. + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. + + An array corresponding to a CDF for a categorical distribution. Not assumed to be normalized. + A real number between 0 and 1. + An integer between 0 and the size of the categorical (exclusive), that corresponds to the inverse CDF for the given probability. + + + + Computes the cumulative distribution function. This method performs no parameter checking. + If the probability mass was normalized, the resulting cumulative distribution is normalized as well (up to numerical errors). + + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + An array representing the unnormalized cumulative distribution function. + + + + Returns one trials from the categorical distribution. + + The random number generator to use. + The (unnormalized) cumulative distribution of the probability distribution. + One sample from the categorical distribution implied by . + + + + Samples a Binomially distributed random variable. + + The number of successful trials. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of Bernoulli distributed random variables. + + a sequence of successful trial counts. + + + + Samples one categorical distributed random variable; also known as the Discrete distribution. + + The random number generator to use. + An array of nonnegative ratios. Not assumed to be normalized. + One random integer between 0 and the size of the categorical (exclusive). + + + + Samples a categorically distributed random variable. + + The random number generator to use. + An array of nonnegative ratios. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + An array of nonnegative ratios. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Samples one categorical distributed random variable; also known as the Discrete distribution. + + An array of nonnegative ratios. Not assumed to be normalized. + One random integer between 0 and the size of the categorical (exclusive). + + + + Samples a categorically distributed random variable. + + An array of nonnegative ratios. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + An array of nonnegative ratios. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Samples one categorical distributed random variable; also known as the Discrete distribution. + + The random number generator to use. + An array of the cumulative distribution. Not assumed to be normalized. + One random integer between 0 and the size of the categorical (exclusive). + + + + Samples a categorically distributed random variable. + + The random number generator to use. + An array of the cumulative distribution. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + An array of the cumulative distribution. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Samples one categorical distributed random variable; also known as the Discrete distribution. + + An array of the cumulative distribution. Not assumed to be normalized. + One random integer between 0 and the size of the categorical (exclusive). + + + + Samples a categorically distributed random variable. + + An array of the cumulative distribution. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + An array of the cumulative distribution. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Continuous Univariate Cauchy distribution. + The Cauchy distribution is a symmetric continuous probability distribution. For details about this distribution, see + Wikipedia - Cauchy distribution. + + + + + Initializes a new instance of the class with the location parameter set to 0 and the scale parameter set to 1 + + + + + Initializes a new instance of the class. + + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + + + + Initializes a new instance of the class. + + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + + + + Gets the location (x0) of the distribution. + + + + + Gets the scale (γ) of the distribution. Range: γ > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Draws a random sample from the distribution. + + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Cauchy distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + the inverse cumulative density at . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + a sequence of samples from the distribution. + + + + Continuous Univariate Chi distribution. + This distribution is a continuous probability distribution. The distribution usually arises when a k-dimensional vector's orthogonal + components are independent and each follow a standard normal distribution. The length of the vector will + then have a chi distribution. + Wikipedia - Chi distribution. + + + + + Initializes a new instance of the class. + + The degrees of freedom (k) of the distribution. Range: k > 0. + + + + Initializes a new instance of the class. + + The degrees of freedom (k) of the distribution. Range: k > 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The degrees of freedom (k) of the distribution. Range: k > 0. + + + + Gets the degrees of freedom (k) of the Chi distribution. Range: k > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Generates a sample from the Chi distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Chi distribution. + + a sequence of samples from the distribution. + + + + Samples the distribution. + + The random number generator to use. + The degrees of freedom (k) of the distribution. Range: k > 0. + a random number from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The degrees of freedom (k) of the distribution. Range: k > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The degrees of freedom (k) of the distribution. Range: k > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The degrees of freedom (k) of the distribution. Range: k > 0. + the cumulative distribution at location . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The degrees of freedom (k) of the distribution. Range: k > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sequence of samples from the distribution. + + + + Continuous Univariate Chi-Squared distribution. + This distribution is a sum of the squares of k independent standard normal random variables. + Wikipedia - ChiSquare distribution. + + + + + Initializes a new instance of the class. + + The degrees of freedom (k) of the distribution. Range: k > 0. + + + + Initializes a new instance of the class. + + The degrees of freedom (k) of the distribution. Range: k > 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The degrees of freedom (k) of the distribution. Range: k > 0. + + + + Gets the degrees of freedom (k) of the Chi-Squared distribution. Range: k > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Generates a sample from the ChiSquare distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the ChiSquare distribution. + + a sequence of samples from the distribution. + + + + Samples the distribution. + + The random number generator to use. + The degrees of freedom (k) of the distribution. Range: k > 0. + a random number from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The degrees of freedom (k) of the distribution. Range: k > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The degrees of freedom (k) of the distribution. Range: k > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The degrees of freedom (k) of the distribution. Range: k > 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The degrees of freedom (k) of the distribution. Range: k > 0. + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + Generates a sample from the ChiSquare distribution. + + The random number generator to use. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Generates a sample from the ChiSquare distribution. + + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Continuous Univariate Uniform distribution. + The continuous uniform distribution is a distribution over real numbers. For details about this distribution, see + Wikipedia - Continuous uniform distribution. + + + + + Initializes a new instance of the ContinuousUniform class with lower bound 0 and upper bound 1. + + + + + Initializes a new instance of the ContinuousUniform class with given lower and upper bounds. + + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + If the upper bound is smaller than the lower bound. + + + + Initializes a new instance of the ContinuousUniform class with given lower and upper bounds. + + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + The random number generator which is used to draw random samples. + If the upper bound is smaller than the lower bound. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + + + + Gets the lower bound of the distribution. + + + + + Gets the upper bound of the distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + + Gets the median of the distribution. + + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Generates a sample from the ContinuousUniform distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the ContinuousUniform distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + the inverse cumulative density at . + + + + + Generates a sample from the ContinuousUniform distribution. + + The random number generator to use. + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + a uniformly distributed sample. + + + + Generates a sequence of samples from the ContinuousUniform distribution. + + The random number generator to use. + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + a sequence of uniformly distributed samples. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + a sequence of samples from the distribution. + + + + Generates a sample from the ContinuousUniform distribution. + + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + a uniformly distributed sample. + + + + Generates a sequence of samples from the ContinuousUniform distribution. + + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + a sequence of uniformly distributed samples. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + a sequence of samples from the distribution. + + + + Discrete Univariate Conway-Maxwell-Poisson distribution. + The Conway-Maxwell-Poisson distribution is a generalization of the Poisson, Geometric and Bernoulli + distributions. It is parameterized by two real numbers "lambda" and "nu". For + + nu = 0 the distribution reverts to a Geometric distribution + nu = 1 the distribution reverts to the Poisson distribution + nu -> infinity the distribution converges to a Bernoulli distribution + + This implementation will cache the value of the normalization constant. + Wikipedia - ConwayMaxwellPoisson distribution. + + + + + The mean of the distribution. + + + + + The variance of the distribution. + + + + + Caches the value of the normalization constant. + + + + + Since many properties of the distribution can only be computed approximately, the tolerance + level specifies how much error we accept. + + + + + Initializes a new instance of the class. + + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Initializes a new instance of the class. + + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + The random number generator which is used to draw random samples. + + + + Returns a that represents this instance. + + A that represents this instance. + + + + Tests whether the provided values are valid parameters for this distribution. + + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Gets the lambda (λ) parameter. Range: λ > 0. + + + + + Gets the rate of decay (ν) parameter. Range: ν ≥ 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution + + + + + Gets the median of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + the cumulative distribution at location . + + + + + Gets the normalization constant of the Conway-Maxwell-Poisson distribution. + + + + + Computes an approximate normalization constant for the CMP distribution. + + The lambda (λ) parameter for the CMP distribution. + The rate of decay (ν) parameter for the CMP distribution. + + an approximate normalization constant for the CMP distribution. + + + + + Returns one trials from the distribution. + + The random number generator to use. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + The z parameter. + + One sample from the distribution implied by , , and . + + + + + Samples a Conway-Maxwell-Poisson distributed random variable. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Samples a sequence of a Conway-Maxwell-Poisson distributed random variables. + + + a sequence of samples from a Conway-Maxwell-Poisson distribution. + + + + + Samples a random variable. + + The random number generator to use. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Samples a sequence of this random variable. + + The random number generator to use. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Samples a random variable. + + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Samples a sequence of this random variable. + + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Multivariate Dirichlet distribution. For details about this distribution, see + Wikipedia - Dirichlet distribution. + + + + + Initializes a new instance of the Dirichlet class. The distribution will + be initialized with the default random number generator. + + An array with the Dirichlet parameters. + + + + Initializes a new instance of the Dirichlet class. The distribution will + be initialized with the default random number generator. + + An array with the Dirichlet parameters. + The random number generator which is used to draw random samples. + + + + Initializes a new instance of the class. + random number generator. + The value of each parameter of the Dirichlet distribution. + The dimension of the Dirichlet distribution. + + + + Initializes a new instance of the class. + random number generator. + The value of each parameter of the Dirichlet distribution. + The dimension of the Dirichlet distribution. + The random number generator which is used to draw random samples. + + + + Returns a that represents this instance. + + + A that represents this instance. + + + + + Tests whether the provided values are valid parameters for this distribution. + No parameter can be less than zero and at least one parameter should be larger than zero. + + The parameters of the Dirichlet distribution. + + + + Gets or sets the parameters of the Dirichlet distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the dimension of the Dirichlet distribution. + + + + + Gets the sum of the Dirichlet parameters. + + + + + Gets the mean of the Dirichlet distribution. + + + + + Gets the variance of the Dirichlet distribution. + + + + + Gets the entropy of the distribution. + + + + + Computes the density of the distribution. + + The locations at which to compute the density. + the density at . + The Dirichlet distribution requires that the sum of the components of x equals 1. + You can also leave out the last component, and it will be computed from the others. + + + + Computes the log density of the distribution. + + The locations at which to compute the density. + the density at . + + + + Samples a Dirichlet distributed random vector. + + A sample from this distribution. + + + + Samples a Dirichlet distributed random vector. + + The random number generator to use. + The Dirichlet distribution parameter. + a sample from the distribution. + + + + Discrete Univariate Uniform distribution. + The discrete uniform distribution is a distribution over integers. The distribution + is parameterized by a lower and upper bound (both inclusive). + Wikipedia - Discrete uniform distribution. + + + + + Initializes a new instance of the DiscreteUniform class. + + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + + + + Initializes a new instance of the DiscreteUniform class. + + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + The random number generator which is used to draw random samples. + + + + Returns a that represents this instance. + + + A that represents this instance. + + + + + Tests whether the provided values are valid parameters for this distribution. + + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + + + + Gets the inclusive lower bound of the probability distribution. + + + + + Gets the inclusive upper bound of the probability distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the mode of the distribution; since every element in the domain has the same probability this method returns the middle one. + + + + + Gets the median of the distribution. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + the cumulative distribution at location . + + + + + Generates one sample from the discrete uniform distribution. This method does not do any parameter checking. + + The random source to use. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + A random sample from the discrete uniform distribution. + + + + Draws a random sample from the distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of uniformly distributed random variables. + + a sequence of samples from the distribution. + + + + Samples a uniformly distributed random variable. + + The random number generator to use. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + A sample from the discrete uniform distribution. + + + + Samples a sequence of uniformly distributed random variables. + + The random number generator to use. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + a sequence of samples from the discrete uniform distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + a sequence of samples from the discrete uniform distribution. + + + + Samples a uniformly distributed random variable. + + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + A sample from the discrete uniform distribution. + + + + Samples a sequence of uniformly distributed random variables. + + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + a sequence of samples from the discrete uniform distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + a sequence of samples from the discrete uniform distribution. + + + + Continuous Univariate Erlang distribution. + This distribution is is a continuous probability distribution with wide applicability primarily due to its + relation to the exponential and Gamma distributions. + Wikipedia - Erlang distribution. + + + + + Initializes a new instance of the class. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + + + + Initializes a new instance of the class. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + The random number generator which is used to draw random samples. + + + + Constructs a Erlang distribution from a shape and scale parameter. The distribution will + be initialized with the default random number generator. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The scale (μ) of the Erlang distribution. Range: μ ≥ 0. + The random number generator which is used to draw random samples. Optional, can be null. + + + + Constructs a Erlang distribution from a shape and inverse scale parameter. The distribution will + be initialized with the default random number generator. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + The random number generator which is used to draw random samples. Optional, can be null. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + + + + Gets the shape (k) of the Erlang distribution. Range: k ≥ 0. + + + + + Gets the rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + + + + + Gets the scale of the Erlang distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum value. + + + + + Gets the Maximum value. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Generates a sample from the Erlang distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Erlang distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + the cumulative distribution at location . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Continuous Univariate Exponential distribution. + The exponential distribution is a distribution over the real numbers parameterized by one non-negative parameter. + Wikipedia - exponential distribution. + + + + + Initializes a new instance of the class. + + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + + + + Initializes a new instance of the class. + + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + + + + Gets the rate (λ) parameter of the distribution. Range: λ ≥ 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Draws a random sample from the distribution. + + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Exponential distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + the inverse cumulative density at . + + + + + Draws a random sample from the distribution. + + The random number generator to use. + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Generates a sequence of samples from the Exponential distribution. + + The random number generator to use. + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Draws a random sample from the distribution. + + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Generates a sequence of samples from the Exponential distribution. + + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Continuous Univariate F-distribution, also known as Fisher-Snedecor distribution. + For details about this distribution, see + Wikipedia - FisherSnedecor distribution. + + + + + Initializes a new instance of the class. + + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + + + + Initializes a new instance of the class. + + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + + + + Gets the first degree of freedom (d1) of the distribution. Range: d1 > 0. + + + + + Gets the second degree of freedom (d2) of the distribution. Range: d2 > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Generates a sample from the FisherSnedecor distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the FisherSnedecor distribution. + + a sequence of samples from the distribution. + + + + Generates one sample from the FisherSnedecor distribution without parameter checking. + + The random number generator to use. + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + a FisherSnedecor distributed random number. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Generates a sample from the distribution. + + The random number generator to use. + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + a sequence of samples from the distribution. + + + + Continuous Univariate Gamma distribution. + For details about this distribution, see + Wikipedia - Gamma distribution. + + + The Gamma distribution is parametrized by a shape and inverse scale parameter. When we want + to specify a Gamma distribution which is a point distribution we set the shape parameter to be the + location of the point distribution and the inverse scale as positive infinity. The distribution + with shape and inverse scale both zero is undefined. + + Random number generation for the Gamma distribution is based on the algorithm in: + "A Simple Method for Generating Gamma Variables" - Marsaglia & Tsang + ACM Transactions on Mathematical Software, Vol. 26, No. 3, September 2000, Pages 363–372. + + + + + Initializes a new instance of the Gamma class. + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + + + + Initializes a new instance of the Gamma class. + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + The random number generator which is used to draw random samples. + + + + Constructs a Gamma distribution from a shape and scale parameter. The distribution will + be initialized with the default random number generator. + + The shape (k) of the Gamma distribution. Range: k ≥ 0. + The scale (θ) of the Gamma distribution. Range: θ ≥ 0 + The random number generator which is used to draw random samples. Optional, can be null. + + + + Constructs a Gamma distribution from a shape and inverse scale parameter. The distribution will + be initialized with the default random number generator. + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + The random number generator which is used to draw random samples. Optional, can be null. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + + + + Gets or sets the shape (k, α) of the Gamma distribution. Range: α ≥ 0. + + + + + Gets or sets the rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + + + + + Gets or sets the scale (θ) of the Gamma distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the Gamma distribution. + + + + + Gets the variance of the Gamma distribution. + + + + + Gets the standard deviation of the Gamma distribution. + + + + + Gets the entropy of the Gamma distribution. + + + + + Gets the skewness of the Gamma distribution. + + + + + Gets the mode of the Gamma distribution. + + + + + Gets the median of the Gamma distribution. + + + + + Gets the minimum of the Gamma distribution. + + + + + Gets the maximum of the Gamma distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Generates a sample from the Gamma distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Gamma distribution. + + a sequence of samples from the distribution. + + + + Sampling implementation based on: + "A Simple Method for Generating Gamma Variables" - Marsaglia & Tsang + ACM Transactions on Mathematical Software, Vol. 26, No. 3, September 2000, Pages 363–372. + This method performs no parameter checks. + + The random number generator to use. + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + A sample from a Gamma distributed random variable. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + the inverse cumulative density at . + + + + + Generates a sample from the Gamma distribution. + + The random number generator to use. + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the Gamma distribution. + + The random number generator to use. + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Generates a sample from the Gamma distribution. + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the Gamma distribution. + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Discrete Univariate Geometric distribution. + The Geometric distribution is a distribution over positive integers parameterized by one positive real number. + This implementation of the Geometric distribution will never generate 0's. + Wikipedia - geometric distribution. + + + + + Initializes a new instance of the Geometric class. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Initializes a new instance of the Geometric class. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + The random number generator which is used to draw random samples. + + + + Returns a that represents this instance. + + A that represents this instance. + + + + Tests whether the provided values are valid parameters for this distribution. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Gets the probability of generating a one. Range: 0 ≤ p ≤ 1. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + Throws a not supported exception. + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + the cumulative distribution at location . + + + + + Returns one sample from the distribution. + + The random number generator to use. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + One sample from the distribution implied by . + + + + Samples a Geometric distributed random variable. + + A sample from the Geometric distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of Geometric distributed random variables. + + a sequence of samples from the distribution. + + + + Samples a random variable. + + The random number generator to use. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Samples a sequence of this random variable. + + The random number generator to use. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Samples a random variable. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Samples a sequence of this random variable. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Discrete Univariate Hypergeometric distribution. + This distribution is a discrete probability distribution that describes the number of successes in a sequence + of n draws from a finite population without replacement, just as the binomial distribution + describes the number of successes for draws with replacement + Wikipedia - Hypergeometric distribution. + + + + + Initializes a new instance of the Hypergeometric class. + + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Initializes a new instance of the Hypergeometric class. + + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + The random number generator which is used to draw random samples. + + + + Returns a that represents this instance. + + + A that represents this instance. + + + + + Tests whether the provided values are valid parameters for this distribution. + + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the size of the population (N). + + + + + Gets the number of draws without replacement (n). + + + + + Gets the number successes within the population (K, M). + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + the cumulative distribution at location . + + + + + Generates a sample from the Hypergeometric distribution without doing parameter checking. + + The random number generator to use. + The size of the population (N). + The number successes within the population (K, M). + The n parameter of the distribution. + a random number from the Hypergeometric distribution. + + + + Samples a Hypergeometric distributed random variable. + + The number of successes in n trials. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of Hypergeometric distributed random variables. + + a sequence of successes in n trials. + + + + Samples a random variable. + + The random number generator to use. + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Samples a sequence of this random variable. + + The random number generator to use. + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Samples a random variable. + + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Samples a sequence of this random variable. + + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Continuous Univariate Probability Distribution. + + + + + + Gets the mode of the distribution. + + + + + Gets the smallest element in the domain of the distribution which can be represented by a double. + + + + + Gets the largest element in the domain of the distribution which can be represented by a double. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + Draws a random sample from the distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Draws a sequence of random samples from the distribution. + + an infinite sequence of samples from the distribution. + + + + Discrete Univariate Probability Distribution. + + + + + + Gets the mode of the distribution. + + + + + Gets the smallest element in the domain of the distribution which can be represented by an integer. + + + + + Gets the largest element in the domain of the distribution which can be represented by an integer. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Draws a random sample from the distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Draws a sequence of random samples from the distribution. + + an infinite sequence of samples from the distribution. + + + + Probability Distribution. + + + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Continuous Univariate Inverse Gamma distribution. + The inverse Gamma distribution is a distribution over the positive real numbers parameterized by + two positive parameters. + Wikipedia - InverseGamma distribution. + + + + + Initializes a new instance of the class. + + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + + + + Initializes a new instance of the class. + + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + + + + Gets or sets the shape (α) parameter. Range: α > 0. + + + + + Gets or sets The scale (β) parameter. Range: β > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + Throws . + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Draws a random sample from the distribution. + + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Cauchy distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + the cumulative distribution at location . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + a sequence of samples from the distribution. + + + + Multivariate Inverse Wishart distribution. This distribution is + parameterized by the degrees of freedom nu and the scale matrix S. The inverse Wishart distribution + is the conjugate prior for the covariance matrix of a multivariate normal distribution. + Wikipedia - Inverse-Wishart distribution. + + + + + Caches the Cholesky factorization of the scale matrix. + + + + + Initializes a new instance of the class. + + The degree of freedom (ν) for the inverse Wishart distribution. + The scale matrix (Ψ) for the inverse Wishart distribution. + + + + Initializes a new instance of the class. + + The degree of freedom (ν) for the inverse Wishart distribution. + The scale matrix (Ψ) for the inverse Wishart distribution. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The degree of freedom (ν) for the inverse Wishart distribution. + The scale matrix (Ψ) for the inverse Wishart distribution. + + + + Gets or sets the degree of freedom (ν) for the inverse Wishart distribution. + + + + + Gets or sets the scale matrix (Ψ) for the inverse Wishart distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean. + + The mean of the distribution. + + + + Gets the mode of the distribution. + + The mode of the distribution. + A. O'Hagan, and J. J. Forster (2004). Kendall's Advanced Theory of Statistics: Bayesian Inference. 2B (2 ed.). Arnold. ISBN 0-340-80752-0. + + + + Gets the variance of the distribution. + + The variance of the distribution. + Kanti V. Mardia, J. T. Kent and J. M. Bibby (1979). Multivariate Analysis. + + + + Evaluates the probability density function for the inverse Wishart distribution. + + The matrix at which to evaluate the density at. + If the argument does not have the same dimensions as the scale matrix. + the density at . + + + + Samples an inverse Wishart distributed random variable by sampling + a Wishart random variable and inverting the matrix. + + a sample from the distribution. + + + + Samples an inverse Wishart distributed random variable by sampling + a Wishart random variable and inverting the matrix. + + The random number generator to use. + The degree of freedom (ν) for the inverse Wishart distribution. + The scale matrix (Ψ) for the inverse Wishart distribution. + a sample from the distribution. + + + + Univariate Probability Distribution. + + + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the median of the distribution. + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Continuous Univariate Laplace distribution. + The Laplace distribution is a distribution over the real numbers parameterized by a mean and + scale parameter. The PDF is: + p(x) = \frac{1}{2 * scale} \exp{- |x - mean| / scale}. + Wikipedia - Laplace distribution. + + + + + Initializes a new instance of the class (location = 0, scale = 1). + + + + + Initializes a new instance of the class. + + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + If is negative. + + + + Initializes a new instance of the class. + + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + The random number generator which is used to draw random samples. + If is negative. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + + + + Gets the location (μ) of the Laplace distribution. + + + + + Gets the scale (b) of the Laplace distribution. Range: b > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Samples a Laplace distributed random variable. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sample from the Laplace distribution. + + a sample from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + the cumulative distribution at location . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + a sequence of samples from the distribution. + + + + Continuous Univariate Log-Normal distribution. + For details about this distribution, see + Wikipedia - Log-Normal distribution. + + + + + Initializes a new instance of the class. + The distribution will be initialized with the default + random number generator. + + The log-scale (μ) of the logarithm of the distribution. + The shape (σ) of the logarithm of the distribution. Range: σ ≥ 0. + + + + Initializes a new instance of the class. + The distribution will be initialized with the default + random number generator. + + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + The random number generator which is used to draw random samples. + + + + Constructs a log-normal distribution with the desired mu and sigma parameters. + + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + The random number generator which is used to draw random samples. Optional, can be null. + A log-normal distribution. + + + + Constructs a log-normal distribution with the desired mean and variance. + + The mean of the log-normal distribution. + The variance of the log-normal distribution. + The random number generator which is used to draw random samples. Optional, can be null. + A log-normal distribution. + + + + Estimates the log-normal distribution parameters from sample data with maximum-likelihood. + + The samples to estimate the distribution parameters from. + The random number generator which is used to draw random samples. Optional, can be null. + A log-normal distribution. + MATLAB: lognfit + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + + + + Gets the log-scale (μ) (mean of the logarithm) of the distribution. + + + + + Gets the shape (σ) (standard deviation of the logarithm) of the distribution. Range: σ ≥ 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mu of the log-normal distribution. + + + + + Gets the variance of the log-normal distribution. + + + + + Gets the standard deviation of the log-normal distribution. + + + + + Gets the entropy of the log-normal distribution. + + + + + Gets the skewness of the log-normal distribution. + + + + + Gets the mode of the log-normal distribution. + + + + + Gets the median of the log-normal distribution. + + + + + Gets the minimum of the log-normal distribution. + + + + + Gets the maximum of the log-normal distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Generates a sample from the log-normal distribution using the Box-Muller algorithm. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the log-normal distribution using the Box-Muller algorithm. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + the density at . + + MATLAB: lognpdf + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the density. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + the cumulative distribution at location . + + MATLAB: logncdf + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + the inverse cumulative density at . + + MATLAB: logninv + + + + Generates a sample from the log-normal distribution using the Box-Muller algorithm. + + The random number generator to use. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the log-normal distribution using the Box-Muller algorithm. + + The random number generator to use. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + Generates a sample from the log-normal distribution using the Box-Muller algorithm. + + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the log-normal distribution using the Box-Muller algorithm. + + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + Multivariate Matrix-valued Normal distributions. The distribution + is parameterized by a mean matrix (M), a covariance matrix for the rows (V) and a covariance matrix + for the columns (K). If the dimension of M is d-by-m then V is d-by-d and K is m-by-m. + Wikipedia - MatrixNormal distribution. + + + + + The mean of the matrix normal distribution. + + + + + The covariance matrix for the rows. + + + + + The covariance matrix for the columns. + + + + + Initializes a new instance of the class. + + The mean of the matrix normal. + The covariance matrix for the rows. + The covariance matrix for the columns. + If the dimensions of the mean and two covariance matrices don't match. + + + + Initializes a new instance of the class. + + The mean of the matrix normal. + The covariance matrix for the rows. + The covariance matrix for the columns. + The random number generator which is used to draw random samples. + If the dimensions of the mean and two covariance matrices don't match. + + + + Returns a that represents this instance. + + + A that represents this instance. + + + + + Tests whether the provided values are valid parameters for this distribution. + + The mean of the matrix normal. + The covariance matrix for the rows. + The covariance matrix for the columns. + + + + Gets the mean. (M) + + The mean of the distribution. + + + + Gets the row covariance. (V) + + The row covariance. + + + + Gets the column covariance. (K) + + The column covariance. + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Evaluates the probability density function for the matrix normal distribution. + + The matrix at which to evaluate the density at. + the density at + If the argument does not have the correct dimensions. + + + + Samples a matrix normal distributed random variable. + + A random number from this distribution. + + + + Samples a matrix normal distributed random variable. + + The random number generator to use. + The mean of the matrix normal. + The covariance matrix for the rows. + The covariance matrix for the columns. + If the dimensions of the mean and two covariance matrices don't match. + a sequence of samples from the distribution. + + + + Samples a vector normal distributed random variable. + + The random number generator to use. + The mean of the vector normal distribution. + The covariance matrix of the vector normal distribution. + a sequence of samples from defined distribution. + + + + Multivariate Multinomial distribution. For details about this distribution, see + Wikipedia - Multinomial distribution. + + + The distribution is parameterized by a vector of ratios: in other words, the parameter + does not have to be normalized and sum to 1. The reason is that some vectors can't be exactly normalized + to sum to 1 in floating point representation. + + + + + Stores the normalized multinomial probabilities. + + + + + The number of trials. + + + + + Initializes a new instance of the Multinomial class. + + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + The number of trials. + If any of the probabilities are negative or do not sum to one. + If is negative. + + + + Initializes a new instance of the Multinomial class. + + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + The number of trials. + The random number generator which is used to draw random samples. + If any of the probabilities are negative or do not sum to one. + If is negative. + + + + Initializes a new instance of the Multinomial class from histogram . The distribution will + not be automatically updated when the histogram changes. + + Histogram instance + The number of trials. + If any of the probabilities are negative or do not sum to one. + If is negative. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + The number of trials. + If any of the probabilities are negative returns false, + if the sum of parameters is 0.0, or if the number of trials is negative; otherwise true. + + + + Gets the proportion of ratios. + + + + + Gets the number of trials. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Computes values of the probability mass function. + + Non-negative integers x1, ..., xk + The probability mass at location . + When is null. + When length of is not equal to event probabilities count. + + + + Computes values of the log probability mass function. + + Non-negative integers x1, ..., xk + The log probability mass at location . + When is null. + When length of is not equal to event probabilities count. + + + + Samples one multinomial distributed random variable. + + the counts for each of the different possible values. + + + + Samples a sequence multinomially distributed random variables. + + a sequence of counts for each of the different possible values. + + + + Samples one multinomial distributed random variable. + + The random number generator to use. + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + The number of trials. + the counts for each of the different possible values. + + + + Samples a multinomially distributed random variable. + + The random number generator to use. + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + The number of variables needed. + a sequence of counts for each of the different possible values. + + + + Discrete Univariate Negative Binomial distribution. + The negative binomial is a distribution over the natural numbers with two parameters r, p. For the special + case that r is an integer one can interpret the distribution as the number of failures before the r'th success + when the probability of success is p. + Wikipedia - NegativeBinomial distribution. + + + + + Initializes a new instance of the class. + + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Initializes a new instance of the class. + + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + The random number generator which is used to draw random samples. + + + + Returns a that represents this instance. + + + A that represents this instance. + + + + + Tests whether the provided values are valid parameters for this distribution. + + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Gets the number of successes. Range: r ≥ 0. + + + + + Gets the probability of success. Range: 0 ≤ p ≤ 1. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution + + + + + Gets the median of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + the cumulative distribution at location . + + + + + Samples a negative binomial distributed random variable. + + The random number generator to use. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + a sample from the distribution. + + + + Samples a NegativeBinomial distributed random variable. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of NegativeBinomial distributed random variables. + + a sequence of samples from the distribution. + + + + Samples a random variable. + + The random number generator to use. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Samples a sequence of this random variable. + + The random number generator to use. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Samples a random variable. + + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Samples a sequence of this random variable. + + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Continuous Univariate Normal distribution, also known as Gaussian distribution. + For details about this distribution, see + Wikipedia - Normal distribution. + + + + + Initializes a new instance of the Normal class. This is a normal distribution with mean 0.0 + and standard deviation 1.0. The distribution will + be initialized with the default random number generator. + + + + + Initializes a new instance of the Normal class. This is a normal distribution with mean 0.0 + and standard deviation 1.0. The distribution will + be initialized with the default random number generator. + + The random number generator which is used to draw random samples. + + + + Initializes a new instance of the Normal class with a particular mean and standard deviation. The distribution will + be initialized with the default random number generator. + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + + + + Initializes a new instance of the Normal class with a particular mean and standard deviation. The distribution will + be initialized with the default random number generator. + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + The random number generator which is used to draw random samples. + + + + Constructs a normal distribution from a mean and standard deviation. + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + The random number generator which is used to draw random samples. Optional, can be null. + a normal distribution. + + + + Constructs a normal distribution from a mean and variance. + + The mean (μ) of the normal distribution. + The variance (σ^2) of the normal distribution. + The random number generator which is used to draw random samples. Optional, can be null. + A normal distribution. + + + + Constructs a normal distribution from a mean and precision. + + The mean (μ) of the normal distribution. + The precision of the normal distribution. + The random number generator which is used to draw random samples. Optional, can be null. + A normal distribution. + + + + Estimates the normal distribution parameters from sample data with maximum-likelihood. + + The samples to estimate the distribution parameters from. + The random number generator which is used to draw random samples. Optional, can be null. + A normal distribution. + MATLAB: normfit + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + + + + Gets the mean (μ) of the normal distribution. + + + + + Gets the standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + + + + + Gets the variance of the normal distribution. + + + + + Gets the precision of the normal distribution. + + + + + Gets the random number generator which is used to draw random samples. + + + + + Gets the entropy of the normal distribution. + + + + + Gets the skewness of the normal distribution. + + + + + Gets the mode of the normal distribution. + + + + + Gets the median of the normal distribution. + + + + + Gets the minimum of the normal distribution. + + + + + Gets the maximum of the normal distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Generates a sample from the normal distribution using the Box-Muller algorithm. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the normal distribution using the Box-Muller algorithm. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + The location at which to compute the density. + the density at . + + MATLAB: normpdf + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + the cumulative distribution at location . + + MATLAB: normcdf + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + the inverse cumulative density at . + + MATLAB: norminv + + + + Generates a sample from the normal distribution using the Box-Muller algorithm. + + The random number generator to use. + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the normal distribution using the Box-Muller algorithm. + + The random number generator to use. + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + Generates a sample from the normal distribution using the Box-Muller algorithm. + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the normal distribution using the Box-Muller algorithm. + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + This structure represents the type over which the distribution + is defined. + + + + + The mean value. + + + + + The precision value. + + + + + Initializes a new instance of the struct. + + The mean of the pair. + The precision of the pair. + + + + Gets or sets the mean of the pair. + + + + + Gets or sets the precision of the pair. + + + + + Multivariate Normal-Gamma Distribution. + The distribution is the conjugate prior distribution for the + distribution. It specifies a prior over the mean and precision of the distribution. + It is parameterized by four numbers: the mean location, the mean scale, the precision shape and the + precision inverse scale. + The distribution NG(mu, tau | mloc,mscale,psscale,pinvscale) = Normal(mu | mloc, 1/(mscale*tau)) * Gamma(tau | psscale,pinvscale). + The following degenerate cases are special: when the precision is known, + the precision shape will encode the value of the precision while the precision inverse scale is positive + infinity. When the mean is known, the mean location will encode the value of the mean while the scale + will be positive infinity. A completely degenerate NormalGamma distribution with known mean and precision is possible as well. + Wikipedia - Normal-Gamma distribution. + + + + + Initializes a new instance of the class. + + The location of the mean. + The scale of the mean. + The shape of the precision. + The inverse scale of the precision. + + + + Initializes a new instance of the class. + + The location of the mean. + The scale of the mean. + The shape of the precision. + The inverse scale of the precision. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The location of the mean. + The scale of the mean. + The shape of the precision. + The inverse scale of the precision. + + + + Gets the location of the mean. + + + + + Gets the scale of the mean. + + + + + Gets the shape of the precision. + + + + + Gets the inverse scale of the precision. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Returns the marginal distribution for the mean of the NormalGamma distribution. + + the marginal distribution for the mean of the NormalGamma distribution. + + + + Returns the marginal distribution for the precision of the distribution. + + The marginal distribution for the precision of the distribution/ + + + + Gets the mean of the distribution. + + The mean of the distribution. + + + + Gets the variance of the distribution. + + The mean of the distribution. + + + + Evaluates the probability density function for a NormalGamma distribution. + + The mean/precision pair of the distribution + Density value + + + + Evaluates the probability density function for a NormalGamma distribution. + + The mean of the distribution + The precision of the distribution + Density value + + + + Evaluates the log probability density function for a NormalGamma distribution. + + The mean/precision pair of the distribution + The log of the density value + + + + Evaluates the log probability density function for a NormalGamma distribution. + + The mean of the distribution + The precision of the distribution + The log of the density value + + + + Generates a sample from the NormalGamma distribution. + + a sample from the distribution. + + + + Generates a sequence of samples from the NormalGamma distribution + + a sequence of samples from the distribution. + + + + Generates a sample from the NormalGamma distribution. + + The random number generator to use. + The location of the mean. + The scale of the mean. + The shape of the precision. + The inverse scale of the precision. + a sample from the distribution. + + + + Generates a sequence of samples from the NormalGamma distribution + + The random number generator to use. + The location of the mean. + The scale of the mean. + The shape of the precision. + The inverse scale of the precision. + a sequence of samples from the distribution. + + + + Continuous Univariate Pareto distribution. + The Pareto distribution is a power law probability distribution that coincides with social, + scientific, geophysical, actuarial, and many other types of observable phenomena. + For details about this distribution, see + Wikipedia - Pareto distribution. + + + + + Initializes a new instance of the class. + + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + If or are negative. + + + + Initializes a new instance of the class. + + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + The random number generator which is used to draw random samples. + If or are negative. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + + + + Gets the scale (xm) of the distribution. Range: xm > 0. + + + + + Gets the shape (α) of the distribution. Range: α > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Draws a random sample from the distribution. + + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Pareto distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + the inverse cumulative density at . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + a sequence of samples from the distribution. + + + + Discrete Univariate Poisson distribution. + + + Distribution is described at Wikipedia - Poisson distribution. + Knuth's method is used to generate Poisson distributed random variables. + f(x) = exp(-λ)*λ^x/x!; + + + + + Initializes a new instance of the class. + + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + If is equal or less then 0.0. + + + + Initializes a new instance of the class. + + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + The random number generator which is used to draw random samples. + If is equal or less then 0.0. + + + + Returns a that represents this instance. + + + A that represents this instance. + + + + + Tests whether the provided values are valid parameters for this distribution. + + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + + + + Gets the Poisson distribution parameter λ. Range: λ > 0. + + + + + Gets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + Approximation, see Wikipedia Poisson distribution + + + + Gets the skewness of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + Approximation, see Wikipedia Poisson distribution + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + the cumulative distribution at location . + + + + + Generates one sample from the Poisson distribution. + + The random source to use. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + A random sample from the Poisson distribution. + + + + Generates one sample from the Poisson distribution by Knuth's method. + + The random source to use. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + A random sample from the Poisson distribution. + + + + Generates one sample from the Poisson distribution by "Rejection method PA". + + The random source to use. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + A random sample from the Poisson distribution. + "Rejection method PA" from "The Computer Generation of Poisson Random Variables" by A. C. Atkinson, + Journal of the Royal Statistical Society Series C (Applied Statistics) Vol. 28, No. 1. (1979) + The article is on pages 29-35. The algorithm given here is on page 32. + + + + Samples a Poisson distributed random variable. + + A sample from the Poisson distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of Poisson distributed random variables. + + a sequence of successes in N trials. + + + + Samples a Poisson distributed random variable. + + The random number generator to use. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + A sample from the Poisson distribution. + + + + Samples a sequence of Poisson distributed random variables. + + The random number generator to use. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Samples a Poisson distributed random variable. + + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + A sample from the Poisson distribution. + + + + Samples a sequence of Poisson distributed random variables. + + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Continuous Univariate Rayleigh distribution. + The Rayleigh distribution (pronounced /ˈreɪli/) is a continuous probability distribution. As an + example of how it arises, the wind speed will have a Rayleigh distribution if the components of + the two-dimensional wind velocity vector are uncorrelated and normally distributed with equal variance. + For details about this distribution, see + Wikipedia - Rayleigh distribution. + + + + + Initializes a new instance of the class. + + The scale (σ) of the distribution. Range: σ > 0. + If is negative. + + + + Initializes a new instance of the class. + + The scale (σ) of the distribution. Range: σ > 0. + The random number generator which is used to draw random samples. + If is negative. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The scale (σ) of the distribution. Range: σ > 0. + + + + Gets the scale (σ) of the distribution. Range: σ > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Draws a random sample from the distribution. + + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Rayleigh distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The scale (σ) of the distribution. Range: σ > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The scale (σ) of the distribution. Range: σ > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The scale (σ) of the distribution. Range: σ > 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The scale (σ) of the distribution. Range: σ > 0. + the inverse cumulative density at . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The scale (σ) of the distribution. Range: σ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The scale (σ) of the distribution. Range: σ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Continuous Univariate Stable distribution. + A random variable is said to be stable (or to have a stable distribution) if it has + the property that a linear combination of two independent copies of the variable has + the same distribution, up to location and scale parameters. + For details about this distribution, see + Wikipedia - Stable distribution. + + + + + Initializes a new instance of the class. + + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + + + + Initializes a new instance of the class. + + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + + + + Gets the stability (α) of the distribution. Range: 2 ≥ α > 0. + + + + + Gets The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + + + + + Gets the scale (c) of the distribution. Range: c > 0. + + + + + Gets the location (μ) of the distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets he entropy of the distribution. + + Always throws a not supported exception. + + + + Gets the skewness of the distribution. + + Throws a not supported exception of Alpha != 2. + + + + Gets the mode of the distribution. + + Throws a not supported exception if Beta != 0. + + + + Gets the median of the distribution. + + Throws a not supported exception if Beta != 0. + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + Throws a not supported exception if Alpha != 2, (Alpha != 1 and Beta !=0), or (Alpha != 0.5 and Beta != 1) + + + + Samples the distribution. + + The random number generator to use. + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + a random number from the distribution. + + + + Draws a random sample from the distribution. + + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Stable distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + the cumulative distribution at location . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + a sequence of samples from the distribution. + + + + Continuous Univariate Student's T-distribution. + Implements the univariate Student t-distribution. For details about this + distribution, see + + Wikipedia - Student's t-distribution. + + We use a slightly generalized version (compared to + Wikipedia) of the Student t-distribution. Namely, one which also + parameterizes the location and scale. See the book "Bayesian Data + Analysis" by Gelman et al. for more details. + The density of the Student t-distribution p(x|mu,scale,dof) = + Gamma((dof+1)/2) (1 + (x - mu)^2 / (scale * scale * dof))^(-(dof+1)/2) / + (Gamma(dof/2)*Sqrt(dof*pi*scale)). + The distribution will use the by + default. Users can get/set the random number generator by using the + property. + The statistics classes will check all the incoming parameters + whether they are in the allowed range. This might involve heavy + computation. Optionally, by setting Control.CheckDistributionParameters + to false, all parameter checks can be turned off. + + + + Initializes a new instance of the StudentT class. This is a Student t-distribution with location 0.0 + scale 1.0 and degrees of freedom 1. + + + + + Initializes a new instance of the StudentT class with a particular location, scale and degrees of + freedom. + + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + + + + Initializes a new instance of the StudentT class with a particular location, scale and degrees of + freedom. + + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + + + + Gets the location (μ) of the Student t-distribution. + + + + + Gets the scale (σ) of the Student t-distribution. Range: σ > 0. + + + + + Gets the degrees of freedom (ν) of the Student t-distribution. Range: ν > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the Student t-distribution. + + + + + Gets the variance of the Student t-distribution. + + + + + Gets the standard deviation of the Student t-distribution. + + + + + Gets the entropy of the Student t-distribution. + + + + + Gets the skewness of the Student t-distribution. + + + + + Gets the mode of the Student t-distribution. + + + + + Gets the median of the Student t-distribution. + + + + + Gets the minimum of the Student t-distribution. + + + + + Gets the maximum of the Student t-distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Samples student-t distributed random variables. + + The algorithm is method 2 in section 5, chapter 9 + in L. Devroye's "Non-Uniform Random Variate Generation" + The random number generator to use. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + a random number from the standard student-t distribution. + + + + Generates a sample from the Student t-distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Student t-distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Generates a sample from the Student t-distribution. + + The random number generator to use. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the Student t-distribution using the Box-Muller algorithm. + + The random number generator to use. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the Student t-distribution. + + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the Student t-distribution using the Box-Muller algorithm. + + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + a sequence of samples from the distribution. + + + + Triangular distribution. + For details, see Wikipedia - Triangular distribution. + + The distribution will use the by default. + Users can get/set the random number generator by using the property. + The statistics classes will check whether all the incoming parameters are in the allowed range. This might involve heavy computation. Optionally, by setting Control.CheckDistributionParameters + to false, all parameter checks can be turned off. + + + + Initializes a new instance of the Triangular class with the given lower bound, upper bound and mode. + + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + If the upper bound is smaller than the mode or if the mode is smaller than the lower bound. + + + + Initializes a new instance of the Triangular class with the given lower bound, upper bound and mode. + + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + The random number generator which is used to draw random samples. + If the upper bound is smaller than the mode or if the mode is smaller than the lower bound. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + + + + Gets the lower bound of the distribution. + + + + + Gets the upper bound of the distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + + Gets the skewness of the distribution. + + + + + Gets or sets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Generates a sample from the Triangular distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Triangular distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + the inverse cumulative density at . + + + + + Generates a sample from the Triangular distribution. + + The random number generator to use. + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + a sample from the distribution. + + + + Generates a sequence of samples from the Triangular distribution. + + The random number generator to use. + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + a sequence of samples from the distribution. + + + + Generates a sample from the Triangular distribution. + + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + a sample from the distribution. + + + + Generates a sequence of samples from the Triangular distribution. + + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + a sequence of samples from the distribution. + + + + Continuous Univariate Weibull distribution. + For details about this distribution, see + Wikipedia - Weibull distribution. + + + The Weibull distribution is parametrized by a shape and scale parameter. + + + + + Reusable intermediate result 1 / (_scale ^ _shape) + + + By caching this parameter we can get slightly better numerics precision + in certain constellations without any additional computations. + + + + + Initializes a new instance of the Weibull class. + + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + + + + Initializes a new instance of the Weibull class. + + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + + + + Gets the shape (k) of the Weibull distribution. Range: k > 0. + + + + + Gets the scale (λ) of the Weibull distribution. Range: λ > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the Weibull distribution. + + + + + Gets the variance of the Weibull distribution. + + + + + Gets the standard deviation of the Weibull distribution. + + + + + Gets the entropy of the Weibull distribution. + + + + + Gets the skewness of the Weibull distribution. + + + + + Gets the mode of the Weibull distribution. + + + + + Gets the median of the Weibull distribution. + + + + + Gets the minimum of the Weibull distribution. + + + + + Gets the maximum of the Weibull distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Generates a sample from the Weibull distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Weibull distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + the cumulative distribution at location . + + + + + Implemented according to: Parameter estimation of the Weibull probability distribution, 1994, Hongzhu Qiao, Chris P. Tsokos + + + + Returns a Weibull distribution. + + + + Generates a sample from the Weibull distribution. + + The random number generator to use. + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the Weibull distribution. + + The random number generator to use. + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the Weibull distribution. + + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the Weibull distribution. + + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Multivariate Wishart distribution. This distribution is + parameterized by the degrees of freedom nu and the scale matrix S. The Wishart distribution + is the conjugate prior for the precision (inverse covariance) matrix of the multivariate + normal distribution. + Wikipedia - Wishart distribution. + + + + + The degrees of freedom for the Wishart distribution. + + + + + The scale matrix for the Wishart distribution. + + + + + Caches the Cholesky factorization of the scale matrix. + + + + + Initializes a new instance of the class. + + The degrees of freedom (n) for the Wishart distribution. + The scale matrix (V) for the Wishart distribution. + + + + Initializes a new instance of the class. + + The degrees of freedom (n) for the Wishart distribution. + The scale matrix (V) for the Wishart distribution. + The random number generator which is used to draw random samples. + + + + Tests whether the provided values are valid parameters for this distribution. + + The degrees of freedom (n) for the Wishart distribution. + The scale matrix (V) for the Wishart distribution. + + + + Gets or sets the degrees of freedom (n) for the Wishart distribution. + + + + + Gets or sets the scale matrix (V) for the Wishart distribution. + + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + The mean of the distribution. + + + + Gets the mode of the distribution. + + The mode of the distribution. + + + + Gets the variance of the distribution. + + The variance of the distribution. + + + + Evaluates the probability density function for the Wishart distribution. + + The matrix at which to evaluate the density at. + If the argument does not have the same dimensions as the scale matrix. + the density at . + + + + Samples a Wishart distributed random variable using the method + Algorithm AS 53: Wishart Variate Generator + W. B. Smith and R. R. Hocking + Applied Statistics, Vol. 21, No. 3 (1972), pp. 341-345 + + A random number from this distribution. + + + + Samples a Wishart distributed random variable using the method + Algorithm AS 53: Wishart Variate Generator + W. B. Smith and R. R. Hocking + Applied Statistics, Vol. 21, No. 3 (1972), pp. 341-345 + + The random number generator to use. + The degrees of freedom (n) for the Wishart distribution. + The scale matrix (V) for the Wishart distribution. + a sequence of samples from the distribution. + + + + Samples the distribution. + + The random number generator to use. + The degrees of freedom (n) for the Wishart distribution. + The scale matrix (V) for the Wishart distribution. + The cholesky decomposition to use. + a random number from the distribution. + + + + Discrete Univariate Zipf distribution. + Zipf's law, an empirical law formulated using mathematical statistics, refers to the fact + that many types of data studied in the physical and social sciences can be approximated with + a Zipfian distribution, one of a family of related discrete power law probability distributions. + For details about this distribution, see + Wikipedia - Zipf distribution. + + + + + The s parameter of the distribution. + + + + + The n parameter of the distribution. + + + + + Initializes a new instance of the class. + + The s parameter of the distribution. + The n parameter of the distribution. + + + + Initializes a new instance of the class. + + The s parameter of the distribution. + The n parameter of the distribution. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The s parameter of the distribution. + The n parameter of the distribution. + + + + Gets or sets the s parameter of the distribution. + + + + + Gets or sets the n parameter of the distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The s parameter of the distribution. + The n parameter of the distribution. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The s parameter of the distribution. + The n parameter of the distribution. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The s parameter of the distribution. + The n parameter of the distribution. + the cumulative distribution at location . + + + + + Generates a sample from the Zipf distribution without doing parameter checking. + + The random number generator to use. + The s parameter of the distribution. + The n parameter of the distribution. + a random number from the Zipf distribution. + + + + Draws a random sample from the distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of zipf distributed random variables. + + a sequence of samples from the distribution. + + + + Samples a random variable. + + The random number generator to use. + The s parameter of the distribution. + The n parameter of the distribution. + + + + Samples a sequence of this random variable. + + The random number generator to use. + The s parameter of the distribution. + The n parameter of the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The s parameter of the distribution. + The n parameter of the distribution. + + + + Samples a random variable. + + The s parameter of the distribution. + The n parameter of the distribution. + + + + Samples a sequence of this random variable. + + The s parameter of the distribution. + The n parameter of the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The s parameter of the distribution. + The n parameter of the distribution. + + + + Integer number theory functions. + + + + + Canonical Modulus. The result has the sign of the divisor. + + + + + Canonical Modulus. The result has the sign of the divisor. + + + + + Canonical Modulus. The result has the sign of the divisor. + + + + + Canonical Modulus. The result has the sign of the divisor. + + + + + Canonical Modulus. The result has the sign of the divisor. + + + + + Remainder (% operator). The result has the sign of the dividend. + + + + + Remainder (% operator). The result has the sign of the dividend. + + + + + Remainder (% operator). The result has the sign of the dividend. + + + + + Remainder (% operator). The result has the sign of the dividend. + + + + + Remainder (% operator). The result has the sign of the dividend. + + + + + Find out whether the provided 32 bit integer is an even number. + + The number to very whether it's even. + True if and only if it is an even number. + + + + Find out whether the provided 64 bit integer is an even number. + + The number to very whether it's even. + True if and only if it is an even number. + + + + Find out whether the provided 32 bit integer is an odd number. + + The number to very whether it's odd. + True if and only if it is an odd number. + + + + Find out whether the provided 64 bit integer is an odd number. + + The number to very whether it's odd. + True if and only if it is an odd number. + + + + Find out whether the provided 32 bit integer is a perfect power of two. + + The number to very whether it's a power of two. + True if and only if it is a power of two. + + + + Find out whether the provided 64 bit integer is a perfect power of two. + + The number to very whether it's a power of two. + True if and only if it is a power of two. + + + + Find out whether the provided 32 bit integer is a perfect square, i.e. a square of an integer. + + The number to very whether it's a perfect square. + True if and only if it is a perfect square. + + + + Find out whether the provided 64 bit integer is a perfect square, i.e. a square of an integer. + + The number to very whether it's a perfect square. + True if and only if it is a perfect square. + + + + Raises 2 to the provided integer exponent (0 <= exponent < 31). + + The exponent to raise 2 up to. + 2 ^ exponent. + + + + + Raises 2 to the provided integer exponent (0 <= exponent < 63). + + The exponent to raise 2 up to. + 2 ^ exponent. + + + + + Evaluate the binary logarithm of an integer number. + + Two-step method using a De Bruijn-like sequence table lookup. + + + + Find the closest perfect power of two that is larger or equal to the provided + 32 bit integer. + + The number of which to find the closest upper power of two. + A power of two. + + + + + Find the closest perfect power of two that is larger or equal to the provided + 64 bit integer. + + The number of which to find the closest upper power of two. + A power of two. + + + + + Returns the greatest common divisor (gcd) of two integers using Euclid's algorithm. + + First Integer: a. + Second Integer: b. + Greatest common divisor gcd(a,b) + + + + Returns the greatest common divisor (gcd) of a set of integers using Euclid's + algorithm. + + List of Integers. + Greatest common divisor gcd(list of integers) + + + + Returns the greatest common divisor (gcd) of a set of integers using Euclid's algorithm. + + List of Integers. + Greatest common divisor gcd(list of integers) + + + + Computes the extended greatest common divisor, such that a*x + b*y = gcd(a,b). + + First Integer: a. + Second Integer: b. + Resulting x, such that a*x + b*y = gcd(a,b). + Resulting y, such that a*x + b*y = gcd(a,b) + Greatest common divisor gcd(a,b) + + + long x,y,d; + d = Fn.GreatestCommonDivisor(45,18,out x, out y); + -> d == 9 && x == 1 && y == -2 + + The gcd of 45 and 18 is 9: 18 = 2*9, 45 = 5*9. 9 = 1*45 -2*18, therefore x=1 and y=-2. + + + + + Returns the least common multiple (lcm) of two integers using Euclid's algorithm. + + First Integer: a. + Second Integer: b. + Least common multiple lcm(a,b) + + + + Returns the least common multiple (lcm) of a set of integers using Euclid's algorithm. + + List of Integers. + Least common multiple lcm(list of integers) + + + + Returns the least common multiple (lcm) of a set of integers using Euclid's algorithm. + + List of Integers. + Least common multiple lcm(list of integers) + + + + Returns the greatest common divisor (gcd) of two big integers. + + First Integer: a. + Second Integer: b. + Greatest common divisor gcd(a,b) + + + + Returns the greatest common divisor (gcd) of a set of big integers. + + List of Integers. + Greatest common divisor gcd(list of integers) + + + + Returns the greatest common divisor (gcd) of a set of big integers. + + List of Integers. + Greatest common divisor gcd(list of integers) + + + + Computes the extended greatest common divisor, such that a*x + b*y = gcd(a,b). + + First Integer: a. + Second Integer: b. + Resulting x, such that a*x + b*y = gcd(a,b). + Resulting y, such that a*x + b*y = gcd(a,b) + Greatest common divisor gcd(a,b) + + + long x,y,d; + d = Fn.GreatestCommonDivisor(45,18,out x, out y); + -> d == 9 && x == 1 && y == -2 + + The gcd of 45 and 18 is 9: 18 = 2*9, 45 = 5*9. 9 = 1*45 -2*18, therefore x=1 and y=-2. + + + + + Returns the least common multiple (lcm) of two big integers. + + First Integer: a. + Second Integer: b. + Least common multiple lcm(a,b) + + + + Returns the least common multiple (lcm) of a set of big integers. + + List of Integers. + Least common multiple lcm(list of integers) + + + + Returns the least common multiple (lcm) of a set of big integers. + + List of Integers. + Least common multiple lcm(list of integers) + + + + Collection of functions equivalent to those provided by Microsoft Excel + but backed instead by Math.NET Numerics. + We do not recommend to use them except in an intermediate phase when + porting over solutions previously implemented in Excel. + + + + + An algorithm failed to converge. + + + + + An algorithm failed to converge due to a numerical breakdown. + + + + + An error occured calling native provider function. + + + + + An error occured calling native provider function. + + + + + Native provider was unable to allocate sufficent memory. + + + + + Native provider failed LU inversion do to a singular U matrix. + + + + + Compound Monthly Return or Geometric Return or Annualized Return + + + + + Average Gain or Gain Mean + This is a simple average (arithmetic mean) of the periods with a gain. It is calculated by summing the returns for gain periods (return 0) + and then dividing the total by the number of gain periods. + + http://www.offshore-library.com/kb/statistics.php + + + + Average Loss or LossMean + This is a simple average (arithmetic mean) of the periods with a loss. It is calculated by summing the returns for loss periods (return < 0) + and then dividing the total by the number of loss periods. + + http://www.offshore-library.com/kb/statistics.php + + + + Calculation is similar to Standard Deviation , except it calculates an average (mean) return only for periods with a gain + and measures the variation of only the gain periods around the gain mean. Measures the volatility of upside performance. + © Copyright 1996, 1999 Gary L.Gastineau. First Edition. © 1992 Swiss Bank Corporation. + + + + + Similar to standard deviation, except this statistic calculates an average (mean) return for only the periods with a loss and then + measures the variation of only the losing periods around this loss mean. This statistic measures the volatility of downside performance. + + http://www.offshore-library.com/kb/statistics.php + + + + This measure is similar to the loss standard deviation except the downside deviation + considers only returns that fall below a defined minimum acceptable return (MAR) rather than the arithmetic mean. + For example, if the MAR is 7%, the downside deviation would measure the variation of each period that falls below + 7%. (The loss standard deviation, on the other hand, would take only losing periods, calculate an average return for + the losing periods, and then measure the variation between each losing return and the losing return average). + + + + + A measure of volatility in returns below the mean. It's similar to standard deviation, but it only + looks at periods where the investment return was less than average return. + + + + + Measures a fund’s average gain in a gain period divided by the fund’s average loss in a losing + period. Periods can be monthly or quarterly depending on the data frequency. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Example: 1e-14. + Maximum number of iterations. Example: 100. + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first derivative of the function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Example: 1e-14. + Maximum number of iterations. Example: 100. + + + + Find both complex roots of the quadratic equation c + b*x + a*x^2 = 0. + Note the special coefficient order ascending by exponent (consistent with polynomials). + + + + + Find all three complex roots of the cubic equation d + c*x + b*x^2 + a*x^3 = 0. + Note the special coefficient order ascending by exponent (consistent with polynomials). + + + + + Find all roots of the Chebychev polynomial of the first kind. + + The polynomial order and therefore the number of roots. + The real domain interval begin where to start sampling. + The real domain interval end where to stop sampling. + Samples in [a,b] at (b+a)/2+(b-1)/2*cos(pi*(2i-1)/(2n)) + + + + Find all roots of the Chebychev polynomial of the second kind. + + The polynomial order and therefore the number of roots. + The real domain interval begin where to start sampling. + The real domain interval end where to stop sampling. + Samples in [a,b] at (b+a)/2+(b-1)/2*cos(pi*i/(n-1)) + + + + Least-Squares Curve Fitting Routines + + + + + Least-Squares fitting the points (x,y) to a line y : x -> a+b*x, + returning its best fitting parameters as [a, b] array, + where a is the intercept and b the slope. + + + + + Least-Squares fitting the points (x,y) to a line y : x -> a+b*x, + returning a function y' for the best fitting line. + + + + + Least-Squares fitting the points (X,y) = ((x0,x1,..,xk),y) to a linear surface y : X -> p0*x0 + p1*x1 + ... + pk*xk, + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + If an intercept is added, its coefficient will be prepended to the resulting parameters. + + + + + Least-Squares fitting the points (X,y) = ((x0,x1,..,xk),y) to a linear surface y : X -> p0*x0 + p1*x1 + ... + pk*xk, + returning a function y' for the best fitting combination. + If an intercept is added, its coefficient will be prepended to the resulting parameters. + + + + + Weighted Least-Squares fitting the points (X,y) = ((x0,x1,..,xk),y) and weights w to a linear surface y : X -> p0*x0 + p1*x1 + ... + pk*xk, + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + + + + + Least-Squares fitting the points (x,y) to a k-order polynomial y : x -> p0 + p1*x + p2*x^2 + ... + pk*x^k, + returning its best fitting parameters as [p0, p1, p2, ..., pk] array, compatible with Evaluate.Polynomial. + A polynomial with order/degree k has (k+1) coefficients and thus requires at least (k+1) samples. + + + + + Least-Squares fitting the points (x,y) to a k-order polynomial y : x -> p0 + p1*x + p2*x^2 + ... + pk*x^k, + returning a function y' for the best fitting polynomial. + A polynomial with order/degree k has (k+1) coefficients and thus requires at least (k+1) samples. + + + + + Weighted Least-Squares fitting the points (x,y) and weights w to a k-order polynomial y : x -> p0 + p1*x + p2*x^2 + ... + pk*x^k, + returning its best fitting parameters as [p0, p1, p2, ..., pk] array, compatible with Evaluate.Polynomial. + A polynomial with order/degree k has (k+1) coefficients and thus requires at least (k+1) samples. + + + + + Least-Squares fitting the points (x,y) to an arbitrary linear combination y : x -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + + + + + Least-Squares fitting the points (x,y) to an arbitrary linear combination y : x -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning a function y' for the best fitting combination. + + + + + Least-Squares fitting the points (x,y) to an arbitrary linear combination y : x -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + + + + + Least-Squares fitting the points (x,y) to an arbitrary linear combination y : x -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning a function y' for the best fitting combination. + + + + + Least-Squares fitting the points (X,y) = ((x0,x1,..,xk),y) to an arbitrary linear combination y : X -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + + + + + Least-Squares fitting the points (X,y) = ((x0,x1,..,xk),y) to an arbitrary linear combination y : X -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning a function y' for the best fitting combination. + + + + + Least-Squares fitting the points (X,y) = ((x0,x1,..,xk),y) to an arbitrary linear combination y : X -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + + + + + Least-Squares fitting the points (X,y) = ((x0,x1,..,xk),y) to an arbitrary linear combination y : X -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning a function y' for the best fitting combination. + + + + + Least-Squares fitting the points (T,y) = (T,y) to an arbitrary linear combination y : X -> p0*f0(T) + p1*f1(T) + ... + pk*fk(T), + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + + + + + Least-Squares fitting the points (T,y) = (T,y) to an arbitrary linear combination y : X -> p0*f0(T) + p1*f1(T) + ... + pk*fk(T), + returning a function y' for the best fitting combination. + + + + + Least-Squares fitting the points (T,y) = (T,y) to an arbitrary linear combination y : X -> p0*f0(T) + p1*f1(T) + ... + pk*fk(T), + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + + + + + Least-Squares fitting the points (T,y) = (T,y) to an arbitrary linear combination y : X -> p0*f0(T) + p1*f1(T) + ... + pk*fk(T), + returning a function y' for the best fitting combination. + + + + + Generate samples by sampling a function at the provided points. + + + + + Generate a sample sequence by sampling a function at the provided point sequence. + + + + + Generate samples by sampling a function at the provided points. + + + + + Generate a sample sequence by sampling a function at the provided point sequence. + + + + + Generate a linearly spaced sample vector of the given length between the specified values (inclusive). + Equivalent to MATLAB linspace but with the length as first instead of last argument. + + + + + Generate samples by sampling a function at linearly spaced points between the specified values (inclusive). + + + + + Generate a base 10 logarithmically spaced sample vector of the given length between the specified decade exponents (inclusive). + Equivalent to MATLAB logspace but with the length as first instead of last argument. + + + + + Generate samples by sampling a function at base 10 logarithmically spaced points between the specified decade exponents (inclusive). + + + + + Generate a linearly spaced sample vector within the inclusive interval (start, stop) and step 1. + Equivalent to MATLAB colon operator (:). + + + + + Generate a linearly spaced sample vector within the inclusive interval (start, stop) and step 1. + Equivalent to MATLAB colon operator (:). + + + + + Generate a linearly spaced sample vector within the inclusive interval (start, stop) and the provided step. + The start value is aways included as first value, but stop is only included if it stop-start is a multiple of step. + Equivalent to MATLAB double colon operator (::). + + + + + Generate a linearly spaced sample vector within the inclusive interval (start, stop) and the provided step. + The start value is aways included as first value, but stop is only included if it stop-start is a multiple of step. + Equivalent to MATLAB double colon operator (::). + + + + + Generate a linearly spaced sample vector within the inclusive interval (start, stop) and the provide step. + The start value is aways included as first value, but stop is only included if it stop-start is a multiple of step. + Equivalent to MATLAB double colon operator (::). + + + + + Generate samples by sampling a function at linearly spaced points within the inclusive interval (start, stop) and the provide step. + The start value is aways included as first value, but stop is only included if it stop-start is a multiple of step. + + + + + Create a periodic wave. + + The number of samples to generate. + Samples per time unit (Hz). Must be larger than twice the frequency to satisfy the Nyquist criterion. + Frequency in periods per time unit (Hz). + The length of the period when sampled at one sample per time unit. This is the interval of the periodic domain, a typical value is 1.0, or 2*Pi for angular functions. + Optional phase offset. + Optional delay, relative to the phase. + + + + Create a periodic wave. + + The number of samples to generate. + The function to apply to each of the values and evaluate the resulting sample. + Samples per time unit (Hz). Must be larger than twice the frequency to satisfy the Nyquist criterion. + Frequency in periods per time unit (Hz). + The length of the period when sampled at one sample per time unit. This is the interval of the periodic domain, a typical value is 1.0, or 2*Pi for angular functions. + Optional phase offset. + Optional delay, relative to the phase. + + + + Create an infinite periodic wave sequence. + + Samples per time unit (Hz). Must be larger than twice the frequency to satisfy the Nyquist criterion. + Frequency in periods per time unit (Hz). + The length of the period when sampled at one sample per time unit. This is the interval of the periodic domain, a typical value is 1.0, or 2*Pi for angular functions. + Optional phase offset. + Optional delay, relative to the phase. + + + + Create an infinite periodic wave sequence. + + The function to apply to each of the values and evaluate the resulting sample. + Samples per time unit (Hz). Must be larger than twice the frequency to satisfy the Nyquist criterion. + Frequency in periods per time unit (Hz). + The length of the period when sampled at one sample per time unit. This is the interval of the periodic domain, a typical value is 1.0, or 2*Pi for angular functions. + Optional phase offset. + Optional delay, relative to the phase. + + + + Create a Sine wave. + + The number of samples to generate. + Samples per time unit (Hz). Must be larger than twice the frequency to satisfy the Nyquist criterion. + Frequency in periods per time unit (Hz). + The maximal reached peak. + The mean, or DC part, of the signal. + Optional phase offset. + Optional delay, relative to the phase. + + + + Create an infinite Sine wave sequence. + + Samples per unit. + Frequency in samples per unit. + The maximal reached peak. + The mean, or DC part, of the signal. + Optional phase offset. + Optional delay, relative to the phase. + + + + Create a periodic square wave, starting with the high phase. + + The number of samples to generate. + Number of samples of the high phase. + Number of samples of the low phase. + Sample value to be emitted during the low phase. + Sample value to be emitted during the high phase. + Optional delay. + + + + Create an infinite periodic square wave sequence, starting with the high phase. + + Number of samples of the high phase. + Number of samples of the low phase. + Sample value to be emitted during the low phase. + Sample value to be emitted during the high phase. + Optional delay. + + + + Create a periodic triangle wave, starting with the raise phase from the lowest sample. + + The number of samples to generate. + Number of samples of the raise phase. + Number of samples of the fall phase. + Lowest sample value. + Highest sample value. + Optional delay. + + + + Create an infinite periodic triangle wave sequence, starting with the raise phase from the lowest sample. + + Number of samples of the raise phase. + Number of samples of the fall phase. + Lowest sample value. + Highest sample value. + Optional delay. + + + + Create a periodic sawtooth wave, starting with the lowest sample. + + The number of samples to generate. + Number of samples a full sawtooth period. + Lowest sample value. + Highest sample value. + Optional delay. + + + + Create an infinite periodic sawtooth wave sequence, starting with the lowest sample. + + Number of samples a full sawtooth period. + Lowest sample value. + Highest sample value. + Optional delay. + + + + Create an array with each field set to the same value. + + The number of samples to generate. + The value that each field should be set to. + + + + Create an infinite sequence where each element has the same value. + + The value that each element should be set to. + + + + Create a Heaviside Step sample vector. + + The number of samples to generate. + The maximal reached peak. + Offset to the time axis. + + + + Create an infinite Heaviside Step sample sequence. + + The maximal reached peak. + Offset to the time axis. + + + + Create a Kronecker Delta impulse sample vector. + + The number of samples to generate. + The maximal reached peak. + Offset to the time axis. Zero or positive. + + + + Create a Kronecker Delta impulse sample vector. + + The maximal reached peak. + Offset to the time axis, hence the sample index of the impulse. + + + + Create a periodic Kronecker Delta impulse sample vector. + + The number of samples to generate. + impulse sequence period. + The maximal reached peak. + Offset to the time axis. Zero or positive. + + + + Create a Kronecker Delta impulse sample vector. + + impulse sequence period. + The maximal reached peak. + Offset to the time axis. Zero or positive. + + + + Generate samples generated by the given computation. + + + + + Generate an infinite sequence generated by the given computation. + + + + + Generate a Fibonacci sequence, including zero as first value. + + + + + Generate an infinite Fibonacci sequence, including zero as first value. + + + + + Create random samples, uniform between 0 and 1. + Faster than other methods but with reduced guarantees on randomness. + + + + + Create an infinite random sample sequence, uniform between 0 and 1. + Faster than other methods but with reduced guarantees on randomness. + + + + + Generate samples by sampling a function at samples from a probability distribution, uniform between 0 and 1. + Faster than other methods but with reduced guarantees on randomness. + + + + + Generate a sample sequence by sampling a function at samples from a probability distribution, uniform between 0 and 1. + Faster than other methods but with reduced guarantees on randomness. + + + + + Generate samples by sampling a function at sample pairs from a probability distribution, uniform between 0 and 1. + Faster than other methods but with reduced guarantees on randomness. + + + + + Generate a sample sequence by sampling a function at sample pairs from a probability distribution, uniform between 0 and 1. + Faster than other methods but with reduced guarantees on randomness. + + + + + Create samples with independent amplitudes of standard distribution. + + + + + Create an infinite sample sequence with independent amplitudes of standard distribution. + + + + + Create samples with independent amplitudes of normal distribution and a flat spectral density. + + + + + Create an infinite sample sequence with independent amplitudes of normal distribution and a flat spectral density. + + + + + Create samples with independent amplitudes of normal distribution and a flat spectral density. + + + + + Create an infinite sample sequence with independent amplitudes of normal distribution and a flat spectral density. + + + + + Create skew alpha stable samples. + + The number of samples to generate. + Stability alpha-parameter of the stable distribution + Skewness beta-parameter of the stable distribution + Scale c-parameter of the stable distribution + Location mu-parameter of the stable distribution + + + + Create skew alpha stable samples. + + Stability alpha-parameter of the stable distribution + Skewness beta-parameter of the stable distribution + Scale c-parameter of the stable distribution + Location mu-parameter of the stable distribution + + + + Create random samples. + + + + + Create an infinite random sample sequence. + + + + + Create random samples. + + + + + Create an infinite random sample sequence. + + + + + Create random samples. + + + + + Create an infinite random sample sequence. + + + + + Create random samples. + + + + + Create an infinite random sample sequence. + + + + + Generate samples by sampling a function at samples from a probability distribution. + + + + + Generate a sample sequence by sampling a function at samples from a probability distribution. + + + + + Generate samples by sampling a function at sample pairs from a probability distribution. + + + + + Generate a sample sequence by sampling a function at sample pairs from a probability distribution. + + + + + Globalized String Handling Helpers + + + + + Tries to get a from the format provider, + returning the current culture if it fails. + + + An that supplies culture-specific + formatting information. + + A instance. + + + + Tries to get a from the format + provider, returning the current culture if it fails. + + + An that supplies culture-specific + formatting information. + + A instance. + + + + Tries to get a from the format provider, returning the current culture if it fails. + + + An that supplies culture-specific + formatting information. + + A instance. + + + + Globalized Parsing: Tokenize a node by splitting it into several nodes. + + Node that contains the trimmed string to be tokenized. + List of keywords to tokenize by. + keywords to skip looking for (because they've already been handled). + + + + Globalized Parsing: Parse a double number + + First token of the number. + The parsed double number using the current culture information. + + + + + Globalized Parsing: Parse a float number + + First token of the number. + The parsed float number using the current culture information. + + + + + Calculates the R-Squared value, also known as coefficient of determination, + given modelled and observed values + + The values expected from the modelled + The actual data set values obtained + Squared Person product-momentum correlation coefficient. + + + + Calculates the R value, also known as linear correlation coefficient, + given modelled and observed values + + The values expected from the modelled + The actual data set values obtained + Person product-momentum correlation coefficient. + + + + Calculates the Standard Error of the regression, given a sequence of + modeled/predicted values, and a sequence of actual/observed values + + The modelled/predicted values + The observed/actual values + The Standard Error of the regression + + + + Calculates the Standard Error of the regression, given a sequence of + modeled/predicted values, and a sequence of actual/observed values + + The modelled/predicted values + The observed/actual values + The degrees of freedom by which the + number of samples is reduced for performing the Standard Error calculation + The Standard Error of the regression + + + + Complex Fast (FFT) Implementation of the Discrete Fourier Transform (DFT). + + + Complex Fast (FFT) Implementation of the Discrete Fourier Transform (DFT). + + + Complex Fast (FFT) Implementation of the Discrete Fourier Transform (DFT). + + + Complex Fast (FFT) Implementation of the Discrete Fourier Transform (DFT). + + + + + Sequences with length greater than Math.Sqrt(Int32.MaxValue) + 1 + will cause k*k in the Bluestein sequence to overflow (GH-286). + + + + + Generate the bluestein sequence for the provided problem size. + + Number of samples. + Bluestein sequence exp(I*Pi*k^2/N) + + + + Convolution with the bluestein sequence (Parallel Version). + + Sample Vector. + + + + Swap the real and imaginary parts of each sample. + + Sample Vector. + + + + Bluestein generic FFT for arbitrary sized sample vectors. + + Time-space sample vector. + Fourier series exponent sign. + + + + Applies the forward Fast Fourier Transform (FFT) to arbitrary-length sample vectors. + + Sample vector, where the FFT is evaluated in place. + + + + Applies the forward Fast Fourier Transform (FFT) to arbitrary-length sample vectors. + + Sample vector, where the FFT is evaluated in place. + Fourier Transform Convention Options. + + + + Applies the forward Fast Fourier Transform (FFT) to arbitrary-length sample vectors. + + Real part of the sample vector, where the FFT is evaluated in place. + Imaginary part of the sample vector, where the FFT is evaluated in place. + Fourier Transform Convention Options. + + + + Packed Real-Complex forward Fast Fourier Transform (FFT) to arbitrary-length sample vectors. + Since for real-valued time samples the complex spectrum is conjugate-even (symmetry), + the spectrum can be fully reconstructed form the positive frequencies only (first half). + The data array needs to be N+2 (if N is even) or N+1 (if N is odd) long in order to support such a packed spectrum. + + Data array of length N+2 (if N is even) or N+1 (if N is odd). + The number of samples. + Fourier Transform Convention Options. + + + + Applies the forward Fast Fourier Transform (FFT) to multiple dimensional sample data. + + Sample data, where the FFT is evaluated in place. + + The data size per dimension. The first dimension is the major one. + For example, with two dimensions "rows" and "columns" the samples are assumed to be organized row by row. + + Fourier Transform Convention Options. + + + + Applies the forward Fast Fourier Transform (FFT) to two dimensional sample data. + + Sample data, organized row by row, where the FFT is evaluated in place + The number of rows. + The number of columns. + Data available organized column by column instead of row by row can be processed directly by swapping the rows and columns arguments. + Fourier Transform Convention Options. + + + + Applies the forward Fast Fourier Transform (FFT) to a two dimensional data in form of a matrix. + + Sample matrix, where the FFT is evaluated in place + Fourier Transform Convention Options. + + + + Applies the inverse Fast Fourier Transform (iFFT) to arbitrary-length sample vectors. + + Spectrum data, where the iFFT is evaluated in place. + + + + Applies the inverse Fast Fourier Transform (iFFT) to arbitrary-length sample vectors. + + Spectrum data, where the iFFT is evaluated in place. + Fourier Transform Convention Options. + + + + Applies the inverse Fast Fourier Transform (iFFT) to arbitrary-length sample vectors. + + Real part of the sample vector, where the iFFT is evaluated in place. + Imaginary part of the sample vector, where the iFFT is evaluated in place. + Fourier Transform Convention Options. + + + + Packed Real-Complex inverse Fast Fourier Transform (iFFT) to arbitrary-length sample vectors. + Since for real-valued time samples the complex spectrum is conjugate-even (symmetry), + the spectrum can be fully reconstructed form the positive frequencies only (first half). + The data array needs to be N+2 (if N is even) or N+1 (if N is odd) long in order to support such a packed spectrum. + + Data array of length N+2 (if N is even) or N+1 (if N is odd). + The number of samples. + Fourier Transform Convention Options. + + + + Applies the inverse Fast Fourier Transform (iFFT) to multiple dimensional sample data. + + Spectrum data, where the iFFT is evaluated in place. + + The data size per dimension. The first dimension is the major one. + For example, with two dimensions "rows" and "columns" the samples are assumed to be organized row by row. + + Fourier Transform Convention Options. + + + + Applies the inverse Fast Fourier Transform (iFFT) to two dimensional sample data. + + Sample data, organized row by row, where the iFFT is evaluated in place + The number of rows. + The number of columns. + Data available organized column by column instead of row by row can be processed directly by swapping the rows and columns arguments. + Fourier Transform Convention Options. + + + + Applies the inverse Fast Fourier Transform (iFFT) to a two dimensional data in form of a matrix. + + Sample matrix, where the iFFT is evaluated in place + Fourier Transform Convention Options. + + + + Naive forward DFT, useful e.g. to verify faster algorithms. + + Time-space sample vector. + Fourier Transform Convention Options. + Corresponding frequency-space vector. + + + + Naive inverse DFT, useful e.g. to verify faster algorithms. + + Frequency-space sample vector. + Fourier Transform Convention Options. + Corresponding time-space vector. + + + + Radix-2 forward FFT for power-of-two sized sample vectors. + + Sample vector, where the FFT is evaluated in place. + Fourier Transform Convention Options. + + + + + Radix-2 inverse FFT for power-of-two sized sample vectors. + + Sample vector, where the FFT is evaluated in place. + Fourier Transform Convention Options. + + + + + Bluestein forward FFT for arbitrary sized sample vectors. + + Sample vector, where the FFT is evaluated in place. + Fourier Transform Convention Options. + + + + Bluestein inverse FFT for arbitrary sized sample vectors. + + Sample vector, where the FFT is evaluated in place. + Fourier Transform Convention Options. + + + + Extract the exponent sign to be used in forward transforms according to the + provided convention options. + + Fourier Transform Convention Options. + Fourier series exponent sign. + + + + Rescale FFT-the resulting vector according to the provided convention options. + + Fourier Transform Convention Options. + Sample Vector. + + + + Rescale the iFFT-resulting vector according to the provided convention options. + + Fourier Transform Convention Options. + Sample Vector. + + + + Generate the frequencies corresponding to each index in frequency space. + The frequency space has a resolution of sampleRate/N. + Index 0 corresponds to the DC part, the following indices correspond to + the positive frequencies up to the Nyquist frequency (sampleRate/2), + followed by the negative frequencies wrapped around. + + Number of samples. + The sampling rate of the time-space data. + + + + Naive generic DFT, useful e.g. to verify faster algorithms. + + Time-space sample vector. + Fourier series exponent sign. + Corresponding frequency-space vector. + + + + Radix-2 Reorder Helper Method + + Sample type + Sample vector + + + + Radix-2 Step Helper Method + + Sample vector. + Fourier series exponent sign. + Level Group Size. + Index inside of the level. + + + + Radix-2 generic FFT for power-of-two sized sample vectors. + + Sample vector, where the FFT is evaluated in place. + Fourier series exponent sign. + + + + + Radix-2 generic FFT for power-of-two sample vectors (Parallel Version). + + Sample vector, where the FFT is evaluated in place. + Fourier series exponent sign. + + + + + Fourier Transform Convention + + + + + Inverse integrand exponent (forward: positive sign; inverse: negative sign). + + + + + Only scale by 1/N in the inverse direction; No scaling in forward direction. + + + + + Don't scale at all (neither on forward nor on inverse transformation). + + + + + Universal; Symmetric scaling and common exponent (used in Maple). + + + + + Only scale by 1/N in the inverse direction; No scaling in forward direction (used in Matlab). [= AsymmetricScaling] + + + + + Inverse integrand exponent; No scaling at all (used in all Numerical Recipes based implementations). [= InverseExponent | NoScaling] + + + + + Fast (FHT) Implementation of the Discrete Hartley Transform (DHT). + + + Fast (FHT) Implementation of the Discrete Hartley Transform (DHT). + + + + + Naive forward DHT, useful e.g. to verify faster algorithms. + + Time-space sample vector. + Hartley Transform Convention Options. + Corresponding frequency-space vector. + + + + Naive inverse DHT, useful e.g. to verify faster algorithms. + + Frequency-space sample vector. + Hartley Transform Convention Options. + Corresponding time-space vector. + + + + Rescale FFT-the resulting vector according to the provided convention options. + + Fourier Transform Convention Options. + Sample Vector. + + + + Rescale the iFFT-resulting vector according to the provided convention options. + + Fourier Transform Convention Options. + Sample Vector. + + + + Naive generic DHT, useful e.g. to verify faster algorithms. + + Time-space sample vector. + Corresponding frequency-space vector. + + + + Hartley Transform Convention + + + + + Only scale by 1/N in the inverse direction; No scaling in forward direction. + + + + + Don't scale at all (neither on forward nor on inverse transformation). + + + + + Universal; Symmetric scaling. + + + + + Numerical Integration (Quadrature). + + + + + Approximation of the definite integral of an analytic smooth function on a closed interval. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + The expected relative accuracy of the approximation. + Approximation of the finite integral in the given interval. + + + + Approximation of the definite integral of an analytic smooth function on a closed interval. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Approximation of the finite integral in the given interval. + + + + Approximates a 2-dimensional definite integral using an Nth order Gauss-Legendre rule over the rectangle [a,b] x [c,d]. + + The 2-dimensional analytic smooth function to integrate. + Where the interval starts for the first (inside) integral, exclusive and finite. + Where the interval ends for the first (inside) integral, exclusive and finite. + Where the interval starts for the second (outside) integral, exclusive and finite. + /// Where the interval ends for the second (outside) integral, exclusive and finite. + Defines an Nth order Gauss-Legendre rule. The order also defines the number of abscissas and weights for the rule. Precomputed Gauss-Legendre abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024 are used, otherwise they're calulcated on the fly. + Approximation of the finite integral in the given interval. + + + + Approximates a 2-dimensional definite integral using an Nth order Gauss-Legendre rule over the rectangle [a,b] x [c,d]. + + The 2-dimensional analytic smooth function to integrate. + Where the interval starts for the first (inside) integral, exclusive and finite. + Where the interval ends for the first (inside) integral, exclusive and finite. + Where the interval starts for the second (outside) integral, exclusive and finite. + /// Where the interval ends for the second (outside) integral, exclusive and finite. + Approximation of the finite integral in the given interval. + + + + Analytic integration algorithm for smooth functions with no discontinuities + or derivative discontinuities and no poles inside the interval. + + + + + Maximum number of iterations, until the asked + maximum error is (likely to be) satisfied. + + + + + Approximate the integral by the double exponential transformation + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + The expected relative accuracy of the approximation. + Approximation of the finite integral in the given interval. + + + + Compute the abscissa vector for a single level. + + The level to evaluate the abscissa vector for. + Abscissa Vector. + + + + Compute the weight vector for a single level. + + The level to evaluate the weight vector for. + Weight Vector. + + + + Precomputed abscissa vector per level. + + + + + Precomputed weight vector per level. + + + + + Approximates a definite integral using an Nth order Gauss-Legendre rule. Precomputed Gauss-Legendre abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024 are used, otherwise they're calulcated on the fly. + + + + + Initializes a new instance of the class. + + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Defines an Nth order Gauss-Legendre rule. The order also defines the number of abscissas and weights for the rule. Precomputed Gauss-Legendre abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024 are used, otherwise they're calulcated on the fly. + + + + Gettter for the ith abscissa. + + Index of the ith abscissa. + The ith abscissa. + + + + Getter that returns a clone of the array containing the abscissas. + + + + + Getter for the ith weight. + + Index of the ith weight. + The ith weight. + + + + Getter that returns a clone of the array containing the weights. + + + + + Getter for the order. + + + + + Getter for the InvervalBegin. + + + + + Getter for the InvervalEnd. + + + + + Approximates a definite integral using an Nth order Gauss-Legendre rule. + + The analytic smooth function to integrate. + Where the interval starts, exclusive and finite. + Where the interval ends, exclusive and finite. + Defines an Nth order Gauss-Legendre rule. The order also defines the number of abscissas and weights for the rule. Precomputed Gauss-Legendre abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024 are used, otherwise they're calulcated on the fly. + Approximation of the finite integral in the given interval. + + + + Approximates a 2-dimensional definite integral using an Nth order Gauss-Legendre rule over the rectangle [a,b] x [c,d]. + + The 2-dimensional analytic smooth function to integrate. + Where the interval starts for the first (inside) integral, exclusive and finite. + Where the interval ends for the first (inside) integral, exclusive and finite. + Where the interval starts for the second (outside) integral, exclusive and finite. + /// Where the interval ends for the second (outside) integral, exclusive and finite. + Defines an Nth order Gauss-Legendre rule. The order also defines the number of abscissas and weights for the rule. Precomputed Gauss-Legendre abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024 are used, otherwise they're calulcated on the fly. + Approximation of the finite integral in the given interval. + + + + Contains a method to compute the Gauss-Legendre abscissas/weights and precomputed abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024. + + + Contains a method to compute the Gauss-Legendre abscissas/weights and precomputed abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024. + + + + + Precomputed abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024. + + + + + Computes the Gauss-Legendre abscissas/weights. + See Pavel Holoborodko for a description of the algorithm. + + Defines an Nth order Gauss-Legendre rule. The order also defines the number of abscissas and weights for the rule. + Required precision to compute the abscissas/weights. 1e-10 is usually fine. + Object containing the non-negative abscissas/weights, order, and intervalBegin/intervalEnd. The non-negative abscissas/weights are generated over the interval [-1,1] for the given order. + + + + Creates and maps a Gauss-Legendre point. + + + + + Getter for the GaussPoint. + + Defines an Nth order Gauss-Legendre rule. Precomputed Gauss-Legendre abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024 are used, otherwise they're calulcated on the fly. + Object containing the non-negative abscissas/weights, order, and intervalBegin/intervalEnd. The non-negative abscissas/weights are generated over the interval [-1,1] for the given order. + + + + Getter for the GaussPoint. + + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Defines an Nth order Gauss-Legendre rule. Precomputed Gauss-Legendre abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024 are used, otherwise they're calulcated on the fly. + Object containing the abscissas/weights, order, and intervalBegin/intervalEnd. + + + + Maps the non-negative abscissas/weights from the interval [-1, 1] to the interval [intervalBegin, intervalEnd]. + + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Object containing the non-negative abscissas/weights, order, and intervalBegin/intervalEnd. The non-negative abscissas/weights are generated over the interval [-1,1] for the given order. + Object containing the abscissas/weights, order, and intervalBegin/intervalEnd. + + + + Contains the abscissas/weights, order, and intervalBegin/intervalEnd. + + + + + Approximation algorithm for definite integrals by the Trapezium rule of the Newton-Cotes family. + + + Wikipedia - Trapezium Rule + + + + + Direct 2-point approximation of the definite integral in the provided interval by the trapezium rule. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Approximation of the finite integral in the given interval. + + + + Composite N-point approximation of the definite integral in the provided interval by the trapezium rule. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Number of composite subdivision partitions. + Approximation of the finite integral in the given interval. + + + + Adaptive approximation of the definite integral in the provided interval by the trapezium rule. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + The expected accuracy of the approximation. + Approximation of the finite integral in the given interval. + + + + Adaptive approximation of the definite integral by the trapezium rule. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Abscissa vector per level provider. + Weight vector per level provider. + First Level Step + The expected relative accuracy of the approximation. + Approximation of the finite integral in the given interval. + + + + Approximation algorithm for definite integrals by Simpson's rule. + + + + + Direct 3-point approximation of the definite integral in the provided interval by Simpson's rule. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Approximation of the finite integral in the given interval. + + + + Composite N-point approximation of the definite integral in the provided interval by Simpson's rule. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Even number of composite subdivision partitions. + Approximation of the finite integral in the given interval. + + + + Interpolation Factory. + + + + + Creates an interpolation based on arbitrary points. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.Barycentric.InterpolateRationalFloaterHormannSorted + instead, which is more efficient. + + + + + Create a floater hormann rational pole-free interpolation based on arbitrary points. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.Barycentric.InterpolateRationalFloaterHormannSorted + instead, which is more efficient. + + + + + Create a Bulirsch Stoer rational interpolation based on arbitrary points. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.BulirschStoerRationalInterpolation.InterpolateSorted + instead, which is more efficient. + + + + + Create a barycentric polynomial interpolation where the given sample points are equidistant. + + The sample points t, must be equidistant. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.Barycentric.InterpolatePolynomialEquidistantSorted + instead, which is more efficient. + + + + + Create a Neville polynomial interpolation based on arbitrary points. + If the points happen to be equidistant, consider to use the much more robust PolynomialEquidistant instead. + Otherwise, consider whether RationalWithoutPoles would not be a more robust alternative. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.NevillePolynomialInterpolation.InterpolateSorted + instead, which is more efficient. + + + + + Create a piecewise linear interpolation based on arbitrary points. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.LinearSpline.InterpolateSorted + instead, which is more efficient. + + + + + Create piecewise log-linear interpolation based on arbitrary points. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.LogLinear.InterpolateSorted + instead, which is more efficient. + + + + + Create an piecewise natural cubic spline interpolation based on arbitrary points, + with zero secondary derivatives at the boundaries. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.CubicSpline.InterpolateNaturalSorted + instead, which is more efficient. + + + + + Create an piecewise cubic Akima spline interpolation based on arbitrary points. + Akima splines are robust to outliers. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.CubicSpline.InterpolateAkimaSorted + instead, which is more efficient. + + + + + Create a piecewise cubic Hermite spline interpolation based on arbitrary points + and their slopes/first derivative. + + The sample points t. + The sample point values x(t). + The slope at the sample points. Optimized for arrays. + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.CubicSpline.InterpolateHermiteSorted + instead, which is more efficient. + + + + + Create a step-interpolation based on arbitrary points. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.StepInterpolation.InterpolateSorted + instead, which is more efficient. + + + + + Barycentric Interpolation Algorithm. + + Supports neither differentiation nor integration. + + + Sample points (N), sorted ascendingly. + Sample values (N), sorted ascendingly by x. + Barycentric weights (N), sorted ascendingly by x. + + + + Create a barycentric polynomial interpolation from a set of (x,y) value pairs with equidistant x, sorted ascendingly by x. + + + + + Create a barycentric polynomial interpolation from an unordered set of (x,y) value pairs with equidistant x. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a barycentric polynomial interpolation from an unsorted set of (x,y) value pairs with equidistant x. + + + + + Create a barycentric polynomial interpolation from a set of values related to linearly/equidistant spaced points within an interval. + + + + + Create a barycentric rational interpolation without poles, using Mike Floater and Kai Hormann's Algorithm. + The values are assumed to be sorted ascendingly by x. + + Sample points (N), sorted ascendingly. + Sample values (N), sorted ascendingly by x. + + Order of the interpolation scheme, 0 <= order <= N. + In most cases a value between 3 and 8 gives good results. + + + + + Create a barycentric rational interpolation without poles, using Mike Floater and Kai Hormann's Algorithm. + WARNING: Works in-place and can thus causes the data array to be reordered. + + Sample points (N), no sorting assumed. + Sample values (N). + + Order of the interpolation scheme, 0 <= order <= N. + In most cases a value between 3 and 8 gives good results. + + + + + Create a barycentric rational interpolation without poles, using Mike Floater and Kai Hormann's Algorithm. + + Sample points (N), no sorting assumed. + Sample values (N). + + Order of the interpolation scheme, 0 <= order <= N. + In most cases a value between 3 and 8 gives good results. + + + + + Create a barycentric rational interpolation without poles, using Mike Floater and Kai Hormann's Algorithm. + The values are assumed to be sorted ascendingly by x. + + Sample points (N), sorted ascendingly. + Sample values (N), sorted ascendingly by x. + + + + Create a barycentric rational interpolation without poles, using Mike Floater and Kai Hormann's Algorithm. + WARNING: Works in-place and can thus causes the data array to be reordered. + + Sample points (N), no sorting assumed. + Sample values (N). + + + + Create a barycentric rational interpolation without poles, using Mike Floater and Kai Hormann's Algorithm. + + Sample points (N), no sorting assumed. + Sample values (N). + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. NOT SUPPORTED. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. NOT SUPPORTED. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. NOT SUPPORTED. + + Point t to integrate at. + + + + Definite integral between points a and b. NOT SUPPORTED. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Rational Interpolation (with poles) using Roland Bulirsch and Josef Stoer's Algorithm. + + + + This algorithm supports neither differentiation nor integration. + + + + + Sample Points t, sorted ascendingly. + Sample Values x(t), sorted ascendingly by x. + + + + Create a Bulirsch-Stoer rational interpolation from a set of (x,y) value pairs, sorted ascendingly by x. + + + + + Create a Bulirsch-Stoer rational interpolation from an unsorted set of (x,y) value pairs. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a Bulirsch-Stoer rational interpolation from an unsorted set of (x,y) value pairs. + + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. NOT SUPPORTED. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. NOT SUPPORTED. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. NOT SUPPORTED. + + Point t to integrate at. + + + + Definite integral between points a and b. NOT SUPPORTED. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Cubic Spline Interpolation. + + Supports both differentiation and integration. + + + sample points (N+1), sorted ascending + Zero order spline coefficients (N) + First order spline coefficients (N) + second order spline coefficients (N) + third order spline coefficients (N) + + + + Create a hermite cubic spline interpolation from a set of (x,y) value pairs and their slope (first derivative), sorted ascendingly by x. + + + + + Create a hermite cubic spline interpolation from an unsorted set of (x,y) value pairs and their slope (first derivative). + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a hermite cubic spline interpolation from an unsorted set of (x,y) value pairs and their slope (first derivative). + + + + + Create an Akima cubic spline interpolation from a set of (x,y) value pairs, sorted ascendingly by x. + Akima splines are robust to outliers. + + + + + Create an Akima cubic spline interpolation from an unsorted set of (x,y) value pairs. + Akima splines are robust to outliers. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create an Akima cubic spline interpolation from an unsorted set of (x,y) value pairs. + Akima splines are robust to outliers. + + + + + Create a cubic spline interpolation from a set of (x,y) value pairs, sorted ascendingly by x, + and custom boundary/termination conditions. + + + + + Create a cubic spline interpolation from an unsorted set of (x,y) value pairs and custom boundary/termination conditions. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a cubic spline interpolation from an unsorted set of (x,y) value pairs and custom boundary/termination conditions. + + + + + Create a natural cubic spline interpolation from a set of (x,y) value pairs + and zero second derivatives at the two boundaries, sorted ascendingly by x. + + + + + Create a natural cubic spline interpolation from an unsorted set of (x,y) value pairs + and zero second derivatives at the two boundaries. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a natural cubic spline interpolation from an unsorted set of (x,y) value pairs + and zero second derivatives at the two boundaries. + + + + + Three-Point Differentiation Helper. + + Sample Points t. + Sample Values x(t). + Index of the point of the differentiation. + Index of the first sample. + Index of the second sample. + Index of the third sample. + The derivative approximation. + + + + Tridiagonal Solve Helper. + + The a-vector[n]. + The b-vector[n], will be modified by this function. + The c-vector[n]. + The d-vector[n], will be modified by this function. + The x-vector[n] + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. + + Point t to integrate at. + + + + Definite integral between points a and b. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Find the index of the greatest sample point smaller than t, + or the left index of the closest segment for extrapolation. + + + + + Interpolation within the range of a discrete set of known data points. + + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. + + Point t to integrate at. + + + + Definite integral between points a and b. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Piece-wise Linear Interpolation. + + Supports both differentiation and integration. + + + Sample points (N+1), sorted ascending + Sample values (N or N+1) at the corresponding points; intercept, zero order coefficients + Slopes (N) at the sample points (first order coefficients): N + + + + Create a linear spline interpolation from a set of (x,y) value pairs, sorted ascendingly by x. + + + + + Create a linear spline interpolation from an unsorted set of (x,y) value pairs. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a linear spline interpolation from an unsorted set of (x,y) value pairs. + + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. + + Point t to integrate at. + + + + Definite integral between points a and b. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Find the index of the greatest sample point smaller than t, + or the left index of the closest segment for extrapolation. + + + + + Piece-wise Log-Linear Interpolation + + This algorithm supports differentiation, not integration. + + + + Internal Spline Interpolation + + + + Sample points (N), sorted ascending + Natural logarithm of the sample values (N) at the corresponding points + + + + Create a piecewise log-linear interpolation from a set of (x,y) value pairs, sorted ascendingly by x. + + + + + Create a piecewise log-linear interpolation from an unsorted set of (x,y) value pairs. + WARNING: Works in-place and can thus causes the data array to be reordered and modified. + + + + + Create a piecewise log-linear interpolation from an unsorted set of (x,y) value pairs. + + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. + + Point t to integrate at. + + + + Definite integral between points a and b. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Lagrange Polynomial Interpolation using Neville's Algorithm. + + + + This algorithm supports differentiation, but doesn't support integration. + + + When working with equidistant or Chebyshev sample points it is + recommended to use the barycentric algorithms specialized for + these cases instead of this arbitrary Neville algorithm. + + + + + Sample Points t, sorted ascendingly. + Sample Values x(t), sorted ascendingly by x. + + + + Create a Neville polynomial interpolation from a set of (x,y) value pairs, sorted ascendingly by x. + + + + + Create a Neville polynomial interpolation from an unsorted set of (x,y) value pairs. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a Neville polynomial interpolation from an unsorted set of (x,y) value pairs. + + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. NOT SUPPORTED. + + Point t to integrate at. + + + + Definite integral between points a and b. NOT SUPPORTED. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Quadratic Spline Interpolation. + + Supports both differentiation and integration. + + + sample points (N+1), sorted ascending + Zero order spline coefficients (N) + First order spline coefficients (N) + second order spline coefficients (N) + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. + + Point t to integrate at. + + + + Definite integral between points a and b. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Find the index of the greatest sample point smaller than t, + or the left index of the closest segment for extrapolation. + + + + + Left and right boundary conditions. + + + + + Natural Boundary (Zero second derivative). + + + + + Parabolically Terminated boundary. + + + + + Fixed first derivative at the boundary. + + + + + Fixed second derivative at the boundary. + + + + + A step function where the start of each segment is included, and the last segment is open-ended. + Segment i is [x_i, x_i+1) for i < N, or [x_i, infinity] for i = N. + The domain of the function is all real numbers, such that y = 0 where x <. + + Supports both differentiation and integration. + + + Sample points (N), sorted ascending + Samples values (N) of each segment starting at the corresponding sample point. + + + + Create a linear spline interpolation from a set of (x,y) value pairs, sorted ascendingly by x. + + + + + Create a linear spline interpolation from an unsorted set of (x,y) value pairs. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a linear spline interpolation from an unsorted set of (x,y) value pairs. + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. + + Point t to integrate at. + + + + Definite integral between points a and b. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Find the index of the greatest sample point smaller than t. + + + + + Wraps an interpolation with a transformation of the interpolated values. + + Neither differentiation nor integration is supported. + + + + Create a linear spline interpolation from a set of (x,y) value pairs, sorted ascendingly by x. + + + + + Create a linear spline interpolation from an unsorted set of (x,y) value pairs. + WARNING: Works in-place and can thus causes the data array to be reordered and modified. + + + + + Create a linear spline interpolation from an unsorted set of (x,y) value pairs. + + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. NOT SUPPORTED. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. NOT SUPPORTED. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. NOT SUPPORTED. + + Point t to integrate at. + + + + Definite integral between points a and b. NOT SUPPORTED. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + A Matrix class with dense storage. The underlying storage is a one dimensional array in column-major order (column by column). + + + + + Number of rows. + + Using this instead of the RowCount property to speed up calculating + a matrix index in the data array. + + + + Number of columns. + + Using this instead of the ColumnCount property to speed up calculating + a matrix index in the data array. + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new dense matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new dense matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to be in column-major order (column by column) and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + + Create a new dense matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable. + The enumerable is assumed to be in column-major order (column by column). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix and initialize each value to the same provided value. + + + + + Create a new dense matrix and initialize each value using the provided init function. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new dense matrix with values sampled from the provided random distribution. + + + + + Gets the matrix's data. + + The matrix's data. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of add + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the matrix and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Evaluates whether this matrix is symmetric. + + + + + A vector using dense storage. + + + + + Number of elements + + + + + Gets the vector's data. + + + + + Create a new dense vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new dense vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new dense vector directly binding to a raw array. + The array is used directly without copying. + Very efficient, but changes to the array and the vector will affect each other. + + + + + Create a new dense vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector and initialize each value using the provided value. + + + + + Create a new dense vector and initialize each value using the provided init function. + + + + + Create a new dense vector with values sampled from the provided random distribution. + + + + + Gets the vector's data. + + The vector's data. + + + + Returns a reference to the internal data structure. + + The DenseVector whose internal data we are + returning. + + A reference to the internal date of the given vector. + + + + + Returns a vector bound directly to a reference of the provided array. + + The array to bind to the DenseVector object. + + A DenseVector whose values are bound to the given array. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + The scalar to add. + The vector to store the result of the addition. + + + + Adds another vector to this vector and stores the result into the result vector. + + The vector to add to this one. + The vector to store the result of the addition. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The vector to store the result of the subtraction. + + + + Subtracts another vector to this vector and stores the result into the result vector. + + The vector to subtract from this one. + The vector to store the result of the subtraction. + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Negates vector and saves result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + The scalar to multiply. + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Multiplies a vector with a scalar. + + The vector to scale. + The scalar value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a scalar. + + The scalar value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a scalar. + + The vector to divide. + The scalar value. + The result of the division. + If is . + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The divisor to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The divisor to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + of each element of the vector of the given divisor. + + The vector whose elements we want to compute the remainder of. + The divisor to use, + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Creates a double dense vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n,n,..', '(n,n,..)', '[n,n,...]', where n is a double. + + + A double dense vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a real dense vector to double-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a real dense vector to double-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + A matrix type for diagonal matrices. + + + Diagonal matrices can be non-square matrices but the diagonal always starts + at element 0,0. A diagonal matrix will throw an exception if non diagonal + entries are set. The exception to this is when the off diagonal elements are + 0.0 or NaN; these settings will cause no change to the diagonal matrix. + + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new diagonal matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All diagonal cells of the matrix will be initialized to the provided value, all non-diagonal ones to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to contain the diagonal elements only and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new diagonal matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + The matrix to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + The array to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided enumerable. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new diagonal matrix with diagonal values sampled from the provided random distribution. + + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + If the result matrix's dimensions are not the same as this matrix. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to add. + The matrix to store the result of the division. + + + + Computes the determinant of this matrix. + + The determinant of this matrix. + + + + Returns the elements of the diagonal in a . + + The elements of the diagonal. + For non-square matrices, the method returns Min(Rows, Columns) elements where + i == j (i is the row index, and j is the column index). + + + + Copies the values of the given array to the diagonal. + + The array to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + + Copies the values of the given to the diagonal. + + The vector to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced L2 norm of the matrix. + The largest singular value of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + Calculates the condition number of this matrix. + The condition number of the matrix. + + + Computes the inverse of this matrix. + If is not a square matrix. + If is singular. + The inverse of this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Creates a matrix that contains the values from the requested sub-matrix. + + The row to start copying from. + The number of rows to copy. Must be positive. + The column to start copying from. + The number of columns to copy. Must be positive. + The requested sub-matrix. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + If or + is not positive. + + + + Permute the columns of a matrix according to a permutation. + + The column permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Permute the rows of a matrix according to a permutation. + + The row permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Evaluates whether this matrix is symmetric. + + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + A class which encapsulates the functionality of a Cholesky factorization. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Gets the determinant of the matrix for which the Cholesky matrix was computed. + + + + + Gets the log determinant of the matrix for which the Cholesky matrix was computed. + + + + + A class which encapsulates the functionality of a Cholesky factorization for dense matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Symmetric Householder reduction to tridiagonal form. + + Data array of matrix V (eigenvectors) + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tred2 by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + Data array of matrix V (eigenvectors) + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Nonsymmetric reduction to Hessenberg form. + + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Complex scalar division X/Y. + + Real part of X + Imaginary part of X + Real part of Y + Imaginary part of Y + Division result as a number. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an orthogonal matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Factorize matrix using the modified Gram-Schmidt method. + + Initial matrix. On exit is replaced by Q. + Number of rows in Q. + Number of columns in Q. + On exit is filled by R. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Gets or sets Tau vector. Contains additional information on Q - used for native solver. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The type of QR factorization to perform. + If is null. + If row count is less then column count + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + If SVD algorithm failed to converge with matrix . + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + Matrix V is encoded in the property EigenVectors in the way that: + - column corresponding to real eigenvalue represents real eigenvector, + - columns corresponding to the pair of complex conjugate eigenvalues + lambda[i] and lambda[i+1] encode real and imaginary parts of eigenvectors. + + + + + Gets the absolute value of determinant of the square matrix for which the EVD was computed. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + In the Math.Net implementation we also store a set of pivot elements for increased + numerical stability. The pivot elements encode a permutation matrix P such that P*A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Gets the determinant of the matrix for which the LU factorization was computed. + + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A (m x n) may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + If a factorization is performed, the resulting Q matrix is an m x m matrix + and the R matrix is an m x n matrix. If a factorization is performed, the + resulting Q matrix is an m x n matrix and the R matrix is an n x n matrix. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD). + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets the two norm of the . + + The 2-norm of the . + + + + Gets the condition number max(S) / min(S) + + The condition number. + + + + Gets the determinant of the square matrix for which the SVD was computed. + + + + + A class which encapsulates the functionality of a Cholesky factorization for user matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Symmetric Householder reduction to tridiagonal form. + + The eigen vectors to work on. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tred2 by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + The eigen vectors to work on. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Nonsymmetric reduction to Hessenberg form. + + The eigen vectors to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + The eigen vectors to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Complex scalar division X/Y. + + Real part of X + Imaginary part of X + Real part of Y + Imaginary part of Y + Division result as a number. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an orthogonal matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The QR factorization method to use. + If is null. + + + + Generate column from initial matrix to work array + + Initial matrix + The first row + Column index + Generated vector + + + + Perform calculation of Q or R + + Work array + Q or R matrices + The first row + The last row + The first column + The last column + Number of available CPUs + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + + + + + Calculates absolute value of multiplied on signum function of + + Double value z1 + Double value z2 + Result multiplication of signum function and absolute value + + + + Swap column and + + Source matrix + The number of rows in + Column A index to swap + Column B index to swap + + + + Scale column by starting from row + + Source matrix + The number of rows in + Column to scale + Row to scale from + Scale value + + + + Scale vector by starting from index + + Source vector + Row to scale from + Scale value + + + + Given the Cartesian coordinates (da, db) of a point p, these fucntion return the parameters da, db, c, and s + associated with the Givens rotation that zeros the y-coordinate of the point. + + Provides the x-coordinate of the point p. On exit contains the parameter r associated with the Givens rotation + Provides the y-coordinate of the point p. On exit contains the parameter z associated with the Givens rotation + Contains the parameter c associated with the Givens rotation + Contains the parameter s associated with the Givens rotation + This is equivalent to the DROTG LAPACK routine. + + + + Calculate Norm 2 of the column in matrix starting from row + + Source matrix + The number of rows in + Column index + Start row index + Norm2 (Euclidean norm) of the column + + + + Calculate Norm 2 of the vector starting from index + + Source vector + Start index + Norm2 (Euclidean norm) of the vector + + + + Calculate dot product of and + + Source matrix + The number of rows in + Index of column A + Index of column B + Starting row index + Dot product value + + + + Performs rotation of points in the plane. Given two vectors x and y , + each vector element of these vectors is replaced as follows: x(i) = c*x(i) + s*y(i); y(i) = c*y(i) - s*x(i) + + Source matrix + The number of rows in + Index of column A + Index of column B + Scalar "c" value + Scalar "s" value + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + double version of the class. + + + + + Initializes a new instance of the Matrix class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Returns the conjugate transpose of this matrix. + + The conjugate transpose of this matrix. + + + + Puts the conjugate transpose of this matrix into the result matrix. + + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to divide by each element of the matrix. + The matrix to store the result of the division. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The matrix to store the result of the pointwise power. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Computes the Moore-Penrose Pseudo-Inverse of this matrix. + + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Calculates the p-norms of all row vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the p-norms of all column vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all row vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all column vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the value sum of each row vector. + + + + + Calculates the absolute value sum of each row vector. + + + + + Calculates the value sum of each column vector. + + + + + Calculates the absolute value sum of each column vector. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Bi-Conjugate Gradient Stabilized (BiCGStab) solver is an 'improvement' + of the standard Conjugate Gradient (CG) solver. Unlike the CG solver the + BiCGStab can be used on non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The Bi-CGSTAB algorithm was taken from:
+ Templates for the solution of linear systems: Building blocks + for iterative methods +
+ Richard Barrett, Michael Berry, Tony F. Chan, James Demmel, + June M. Donato, Jack Dongarra, Victor Eijkhout, Roldan Pozo, + Charles Romine and Henk van der Vorst +
+ Url: http://www.netlib.org/templates/Templates.html +
+ Algorithm is described in Chapter 2, section 2.3.8, page 27 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient , A. + The solution , b. + The result , x. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A composite matrix solver. The actual solver is made by a sequence of + matrix solvers. + + + + Solver based on:
+ Faster PDE-based simulations using robust composite linear solvers
+ S. Bhowmicka, P. Raghavan a,*, L. McInnes b, B. Norris
+ Future Generation Computer Systems, Vol 20, 2004, pp 373–387
+
+ + Note that if an iterator is passed to this solver it will be used for all the sub-solvers. + +
+
+ + + The collection of solvers that will be used + + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A diagonal preconditioner. The preconditioner uses the inverse + of the matrix diagonal as preconditioning values. + + + + + The inverse of the matrix diagonal. + + + + + Returns the decomposed matrix diagonal. + + The matrix diagonal. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + A Generalized Product Bi-Conjugate Gradient iterative matrix solver. + + + + The Generalized Product Bi-Conjugate Gradient (GPBiCG) solver is an + alternative version of the Bi-Conjugate Gradient stabilized (CG) solver. + Unlike the CG solver the GPBiCG solver can be used on + non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The GPBiCG algorithm was taken from:
+ GPBiCG(m,l): A hybrid of BiCGSTAB and GPBiCG methods with + efficiency and robustness +
+ S. Fujino +
+ Applied Numerical Mathematics, Volume 41, 2002, pp 107 - 117 +
+
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Indicates the number of BiCGStab steps should be taken + before switching. + + + + + Indicates the number of GPBiCG steps should be taken + before switching. + + + + + Gets or sets the number of steps taken with the BiCgStab algorithm + before switching over to the GPBiCG algorithm. + + + + + Gets or sets the number of steps taken with the GPBiCG algorithm + before switching over to the BiCgStab algorithm. + + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Decide if to do steps with BiCgStab + + Number of iteration + true if yes, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + An incomplete, level 0, LU factorization preconditioner. + + + The ILU(0) algorithm was taken from:
+ Iterative methods for sparse linear systems
+ Yousef Saad
+ Algorithm is described in Chapter 10, section 10.3.2, page 275
+
+
+ + + The matrix holding the lower (L) and upper (U) matrices. The + decomposition matrices are combined to reduce storage. + + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + A new matrix containing the lower triagonal elements. + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + This class performs an Incomplete LU factorization with drop tolerance + and partial pivoting. The drop tolerance indicates which additional entries + will be dropped from the factorized LU matrices. + + + The ILUTP-Mem algorithm was taken from:
+ ILUTP_Mem: a Space-Efficient Incomplete LU Preconditioner +
+ Tzu-Yi Chen, Department of Mathematics and Computer Science,
+ Pomona College, Claremont CA 91711, USA
+ Published in:
+ Lecture Notes in Computer Science
+ Volume 3046 / 2004
+ pp. 20 - 28
+ Algorithm is described in Section 2, page 22 +
+
+ + + The default fill level. + + + + + The default drop tolerance. + + + + + The decomposed upper triangular matrix. + + + + + The decomposed lower triangular matrix. + + + + + The array containing the pivot values. + + + + + The fill level. + + + + + The drop tolerance. + + + + + The pivot tolerance. + + + + + Initializes a new instance of the class with the default settings. + + + + + Initializes a new instance of the class with the specified settings. + + + The amount of fill that is allowed in the matrix. The value is a fraction of + the number of non-zero entries in the original matrix. Values should be positive. + + + The absolute drop tolerance which indicates below what absolute value an entry + will be dropped from the matrix. A drop tolerance of 0.0 means that no values + will be dropped. Values should always be positive. + + + The pivot tolerance which indicates at what level pivoting will take place. A + value of 0.0 means that no pivoting will take place. + + + + + Gets or sets the amount of fill that is allowed in the matrix. The + value is a fraction of the number of non-zero entries in the original + matrix. The standard value is 200. + + + + Values should always be positive and can be higher than 1.0. A value lower + than 1.0 means that the eventual preconditioner matrix will have fewer + non-zero entries as the original matrix. A value higher than 1.0 means that + the eventual preconditioner can have more non-zero values than the original + matrix. + + + Note that any changes to the FillLevel after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the absolute drop tolerance which indicates below what absolute value + an entry will be dropped from the matrix. The standard value is 0.0001. + + + + The values should always be positive and can be larger than 1.0. A low value will + keep more small numbers in the preconditioner matrix. A high value will remove + more small numbers from the preconditioner matrix. + + + Note that any changes to the DropTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the pivot tolerance which indicates at what level pivoting will + take place. The standard value is 0.0 which means pivoting will never take place. + + + + The pivot tolerance is used to calculate if pivoting is necessary. Pivoting + will take place if any of the values in a row is bigger than the + diagonal value of that row divided by the pivot tolerance, i.e. pivoting + will take place if row(i,j) > row(i,i) / PivotTolerance for + any j that is not equal to i. + + + Note that any changes to the PivotTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the lower triagonal elements. + + + + Returns the pivot array. This array is not needed for normal use because + the preconditioner will return the solution vector values in the proper order. + + + This method is used for debugging purposes only and should normally not be used. + + The pivot array. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. Note that the + method takes a general matrix type. However internally the data is stored + as a sparse matrix. Therefore it is not recommended to pass a dense matrix. + + If is . + If is not a square matrix. + + + + Pivot elements in the according to internal pivot array + + Row to pivot in + + + + Was pivoting already performed + + Pivots already done + Current item to pivot + true if performed, otherwise false + + + + Swap columns in the + + Source . + First column index to swap + Second column index to swap + + + + Sort vector descending, not changing vector but placing sorted indicies to + + Start sort form + Sort till upper bound + Array with sorted vector indicies + Source + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + Pivot elements in according to internal pivot array + + Source . + Result after pivoting. + + + + An element sort algorithm for the class. + + + This sort algorithm is used to sort the columns in a sparse matrix based on + the value of the element on the diagonal of the matrix. + + + + + Sorts the elements of the vector in decreasing + fashion. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Sorts the elements of the vector in decreasing + fashion using heap sort algorithm. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Build heap for double indicies + + Root position + Length of + Indicies of + Target + + + + Sift double indicies + + Indicies of + Target + Root position + Length of + + + + Sorts the given integers in a decreasing fashion. + + The values. + + + + Sort the given integers in a decreasing fashion using heapsort algorithm + + Array of values to sort + Length of + + + + Build heap + + Target values array + Root position + Length of + + + + Sift values + + Target value array + Root position + Length of + + + + Exchange values in array + + Target values array + First value to exchange + Second value to exchange + + + + A simple milu(0) preconditioner. + + + Original Fortran code by Youcef Saad (07 January 2004) + + + + Use modified or standard ILU(0) + + + + Gets or sets a value indicating whether to use modified or standard ILU(0). + + + + + Gets a value indicating whether the preconditioner is initialized. + + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square or is not an + instance of SparseCompressedRowMatrixStorage. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector b. + The left hand side vector x. + + + + MILU0 is a simple milu(0) preconditioner. + + Order of the matrix. + Matrix values in CSR format (input). + Column indices (input). + Row pointers (input). + Matrix values in MSR format (output). + Row pointers and column indices (output). + Pointer to diagonal elements (output). + True if the modified/MILU algorithm should be used (recommended) + Returns 0 on success or k > 0 if a zero pivot was encountered at step k. + + + + A Multiple-Lanczos Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Multiple-Lanczos Bi-Conjugate Gradient stabilized (ML(k)-BiCGStab) solver is an 'improvement' + of the standard BiCgStab solver. + + + The algorithm was taken from:
+ ML(k)BiCGSTAB: A BiCGSTAB variant based on multiple Lanczos starting vectors +
+ Man-chung Yeung and Tony F. Chan +
+ SIAM Journal of Scientific Computing +
+ Volume 21, Number 4, pp. 1263 - 1290 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + The default number of starting vectors. + + + + + The collection of starting vectors which are used as the basis for the Krylov sub-space. + + + + + The number of starting vectors used by the algorithm + + + + + Gets or sets the number of starting vectors. + + + Must be larger than 1 and smaller than the number of variables in the matrix that + for which this solver will be used. + + + + + Resets the number of starting vectors to the default value. + + + + + Gets or sets a series of orthonormal vectors which will be used as basis for the + Krylov sub-space. + + + + + Gets the number of starting vectors to create + + Maximum number + Number of variables + Number of starting vectors to create + + + + Returns an array of starting vectors. + + The maximum number of starting vectors that should be created. + The number of variables. + + An array with starting vectors. The array will never be larger than the + but it may be smaller if + the is smaller than + the . + + + + + Create random vecrors array + + Number of vectors + Size of each vector + Array of random vectors + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Source A. + Residual data. + x data. + b data. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Transpose Free Quasi-Minimal Residual (TFQMR) iterative matrix solver. + + + + The TFQMR algorithm was taken from:
+ Iterative methods for sparse linear systems. +
+ Yousef Saad +
+ Algorithm is described in Chapter 7, section 7.4.3, page 219 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Is even? + + Number to check + true if even, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Matrix with sparse storage, intended for very large matrices where most of the cells are zero. + The underlying storage scheme is 3-array compressed-sparse-row (CSR) Format. + Wikipedia - CSR. + + + + + Gets the number of non zero elements in the matrix. + + The number of non zero elements. + + + + Create a new sparse matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new sparse matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable. + The enumerable is assumed to be in row-major order (row by row). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + + Create a new sparse matrix with the given number of rows and columns as a copy of the given array. + The array is assumed to be in column-major order (column by column). + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix and initialize each value to the same provided value. + + + + + Create a new sparse matrix and initialize each value using the provided init function. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Evaluates whether this matrix is symmetric. + + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + A vector with sparse storage, intended for very large vectors where most of the cells are zero. + + The sparse vector is not thread safe. + + + + Gets the number of non zero elements in the vector. + + The number of non zero elements. + + + + Create a new sparse vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new sparse vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new sparse vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector and initialize each value using the provided value. + + + + + Create a new sparse vector and initialize each value using the provided init function. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + Warning, the new 'sparse vector' with a non-zero scalar added to it will be a 100% filled + sparse vector and very inefficient. Would be better to work with a dense vector instead. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Negates vector and saves result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Multiplies a vector with a scalar. + + The vector to scale. + The scalar value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a scalar. + + The scalar value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a scalar. + + The vector to divide. + The scalar value. + The result of the division. + If is . + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + of each element of the vector of the given divisor. + + The vector whose elements we want to compute the modulus of. + The divisor to use, + The result of the calculation + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Creates a double sparse vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n,n,..', '(n,n,..)', '[n,n,...]', where n is a double. + + + A double sparse vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a real sparse vector to double-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a real sparse vector to double-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + double version of the class. + + + + + Initializes a new instance of the Vector class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Conjugates vector and save result to + + Target vector + + + + Negates vector and saves result to + + Target vector + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Divides each element of the vector by a scalar and stores the result in the result vector. + + + The scalar to divide with. + + + The vector to store the result of the division. + + + + + Divides a scalar by each element of the vector and stores the result in the result vector. + + The scalar to divide. + The vector to store the result of the division. + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise raise this vector to an exponent and store the result into the result vector. + + The exponent to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Returns the value of the absolute minimum element. + + The value of the absolute minimum element. + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the value of the absolute maximum element. + + The value of the absolute maximum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + + The p value. + + + Scalar ret = ( ∑|At(i)|^p )^(1/p) + + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Normalizes this vector to a unit vector with respect to the p-norm. + + + The p value. + + + This vector normalized to a unit vector with respect to the p-norm. + + + + + A Matrix class with dense storage. The underlying storage is a one dimensional array in column-major order (column by column). + + + + + Number of rows. + + Using this instead of the RowCount property to speed up calculating + a matrix index in the data array. + + + + Number of columns. + + Using this instead of the ColumnCount property to speed up calculating + a matrix index in the data array. + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new dense matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new dense matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to be in column-major order (column by column) and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + + Create a new dense matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable. + The enumerable is assumed to be in column-major order (column by column). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix and initialize each value to the same provided value. + + + + + Create a new dense matrix and initialize each value using the provided init function. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new dense matrix with values sampled from the provided random distribution. + + + + + Gets the matrix's data. + + The matrix's data. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of add + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the matrix and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Evaluates whether this matrix is symmetric. + + + + + A vector using dense storage. + + + + + Number of elements + + + + + Gets the vector's data. + + + + + Create a new dense vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new dense vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new dense vector directly binding to a raw array. + The array is used directly without copying. + Very efficient, but changes to the array and the vector will affect each other. + + + + + Create a new dense vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector and initialize each value using the provided value. + + + + + Create a new dense vector and initialize each value using the provided init function. + + + + + Create a new dense vector with values sampled from the provided random distribution. + + + + + Gets the vector's data. + + The vector's data. + + + + Returns a reference to the internal data structure. + + The DenseVector whose internal data we are + returning. + + A reference to the internal date of the given vector. + + + + + Returns a vector bound directly to a reference of the provided array. + + The array to bind to the DenseVector object. + + A DenseVector whose values are bound to the given array. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + The scalar to add. + The vector to store the result of the addition. + + + + Adds another vector to this vector and stores the result into the result vector. + + The vector to add to this one. + The vector to store the result of the addition. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The vector to store the result of the subtraction. + + + + Subtracts another vector from this vector and stores the result into the result vector. + + The vector to subtract from this one. + The vector to store the result of the subtraction. + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Negates vector and saves result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + The scalar to multiply. + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Multiplies a vector with a scalar. + + The vector to scale. + The scalar value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a scalar. + + The scalar value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a scalar. + + The vector to divide. + The scalar value. + The result of the division. + If is . + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + of each element of the vector of the given divisor. + + The vector whose elements we want to compute the modulus of. + The divisor to use, + The result of the calculation + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Creates a float dense vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n,n,..', '(n,n,..)', '[n,n,...]', where n is a float. + + + A float dense vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a real dense vector to float-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a real dense vector to float-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + A matrix type for diagonal matrices. + + + Diagonal matrices can be non-square matrices but the diagonal always starts + at element 0,0. A diagonal matrix will throw an exception if non diagonal + entries are set. The exception to this is when the off diagonal elements are + 0.0 or NaN; these settings will cause no change to the diagonal matrix. + + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new diagonal matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All diagonal cells of the matrix will be initialized to the provided value, all non-diagonal ones to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to contain the diagonal elements only and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new diagonal matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + The matrix to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + The array to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided enumerable. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new diagonal matrix with diagonal values sampled from the provided random distribution. + + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + If the result matrix's dimensions are not the same as this matrix. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to add. + The matrix to store the result of the division. + + + + Computes the determinant of this matrix. + + The determinant of this matrix. + + + + Returns the elements of the diagonal in a . + + The elements of the diagonal. + For non-square matrices, the method returns Min(Rows, Columns) elements where + i == j (i is the row index, and j is the column index). + + + + Copies the values of the given array to the diagonal. + + The array to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + + Copies the values of the given to the diagonal. + + The vector to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced L2 norm of the matrix. + The largest singular value of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + Calculates the condition number of this matrix. + The condition number of the matrix. + + + Computes the inverse of this matrix. + If is not a square matrix. + If is singular. + The inverse of this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Creates a matrix that contains the values from the requested sub-matrix. + + The row to start copying from. + The number of rows to copy. Must be positive. + The column to start copying from. + The number of columns to copy. Must be positive. + The requested sub-matrix. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + If or + is not positive. + + + + Permute the columns of a matrix according to a permutation. + + The column permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Permute the rows of a matrix according to a permutation. + + The row permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Evaluates whether this matrix is symmetric. + + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + A class which encapsulates the functionality of a Cholesky factorization. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Gets the determinant of the matrix for which the Cholesky matrix was computed. + + + + + Gets the log determinant of the matrix for which the Cholesky matrix was computed. + + + + + A class which encapsulates the functionality of a Cholesky factorization for dense matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Symmetric Householder reduction to tridiagonal form. + + Data array of matrix V (eigenvectors) + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tred2 by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + Data array of matrix V (eigenvectors) + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Nonsymmetric reduction to Hessenberg form. + + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Complex scalar division X/Y. + + Real part of X + Imaginary part of X + Real part of Y + Imaginary part of Y + Division result as a number. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an orthogonal matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Factorize matrix using the modified Gram-Schmidt method. + + Initial matrix. On exit is replaced by Q. + Number of rows in Q. + Number of columns in Q. + On exit is filled by R. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Gets or sets Tau vector. Contains additional information on Q - used for native solver. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The QR factorization method to use. + If is null. + If row count is less then column count + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + If SVD algorithm failed to converge with matrix . + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Gets the absolute value of determinant of the square matrix for which the EVD was computed. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + In the Math.Net implementation we also store a set of pivot elements for increased + numerical stability. The pivot elements encode a permutation matrix P such that P*A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Gets the determinant of the matrix for which the LU factorization was computed. + + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A (m x n) may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + If a factorization is peformed, the resulting Q matrix is an m x m matrix + and the R matrix is an m x n matrix. If a factorization is performed, the + resulting Q matrix is an m x n matrix and the R matrix is an n x n matrix. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD). + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets the two norm of the . + + The 2-norm of the . + + + + Gets the condition number max(S) / min(S) + + The condition number. + + + + Gets the determinant of the square matrix for which the SVD was computed. + + + + + A class which encapsulates the functionality of a Cholesky factorization for user matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Symmetric Householder reduction to tridiagonal form. + + The eigen vectors to work on. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tred2 by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + The eigen vectors to work on. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Nonsymmetric reduction to Hessenberg form. + + The eigen vectors to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + The eigen vectors to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Complex scalar division X/Y. + + Real part of X + Imaginary part of X + Real part of Y + Imaginary part of Y + Division result as a number. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an orthogonal matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The QR factorization method to use. + If is null. + + + + Generate column from initial matrix to work array + + Initial matrix + The first row + Column index + Generated vector + + + + Perform calculation of Q or R + + Work array + Q or R matrices + The first row + The last row + The first column + The last column + Number of available CPUs + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + + + + + Calculates absolute value of multiplied on signum function of + + Double value z1 + Double value z2 + Result multiplication of signum function and absolute value + + + + Swap column and + + Source matrix + The number of rows in + Column A index to swap + Column B index to swap + + + + Scale column by starting from row + + Source matrix + The number of rows in + Column to scale + Row to scale from + Scale value + + + + Scale vector by starting from index + + Source vector + Row to scale from + Scale value + + + + Given the Cartesian coordinates (da, db) of a point p, these fucntion return the parameters da, db, c, and s + associated with the Givens rotation that zeros the y-coordinate of the point. + + Provides the x-coordinate of the point p. On exit contains the parameter r associated with the Givens rotation + Provides the y-coordinate of the point p. On exit contains the parameter z associated with the Givens rotation + Contains the parameter c associated with the Givens rotation + Contains the parameter s associated with the Givens rotation + This is equivalent to the DROTG LAPACK routine. + + + + Calculate Norm 2 of the column in matrix starting from row + + Source matrix + The number of rows in + Column index + Start row index + Norm2 (Euclidean norm) of the column + + + + Calculate Norm 2 of the vector starting from index + + Source vector + Start index + Norm2 (Euclidean norm) of the vector + + + + Calculate dot product of and + + Source matrix + The number of rows in + Index of column A + Index of column B + Starting row index + Dot product value + + + + Performs rotation of points in the plane. Given two vectors x and y , + each vector element of these vectors is replaced as follows: x(i) = c*x(i) + s*y(i); y(i) = c*y(i) - s*x(i) + + Source matrix + The number of rows in + Index of column A + Index of column B + Scalar "c" value + Scalar "s" value + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + float version of the class. + + + + + Initializes a new instance of the Matrix class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Returns the conjugate transpose of this matrix. + + The conjugate transpose of this matrix. + + + + Puts the conjugate transpose of this matrix into the result matrix. + + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to divide by each element of the matrix. + The matrix to store the result of the division. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The matrix to store the result of the pointwise power. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Computes the Moore-Penrose Pseudo-Inverse of this matrix. + + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Calculates the p-norms of all row vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the p-norms of all column vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all row vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all column vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the value sum of each row vector. + + + + + Calculates the absolute value sum of each row vector. + + + + + Calculates the value sum of each column vector. + + + + + Calculates the absolute value sum of each column vector. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Bi-Conjugate Gradient Stabilized (BiCGStab) solver is an 'improvement' + of the standard Conjugate Gradient (CG) solver. Unlike the CG solver the + BiCGStab can be used on non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The Bi-CGSTAB algorithm was taken from:
+ Templates for the solution of linear systems: Building blocks + for iterative methods +
+ Richard Barrett, Michael Berry, Tony F. Chan, James Demmel, + June M. Donato, Jack Dongarra, Victor Eijkhout, Roldan Pozo, + Charles Romine and Henk van der Vorst +
+ Url: http://www.netlib.org/templates/Templates.html +
+ Algorithm is described in Chapter 2, section 2.3.8, page 27 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient , A. + The solution , b. + The result , x. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A composite matrix solver. The actual solver is made by a sequence of + matrix solvers. + + + + Solver based on:
+ Faster PDE-based simulations using robust composite linear solvers
+ S. Bhowmicka, P. Raghavan a,*, L. McInnes b, B. Norris
+ Future Generation Computer Systems, Vol 20, 2004, pp 373–387
+
+ + Note that if an iterator is passed to this solver it will be used for all the sub-solvers. + +
+
+ + + The collection of solvers that will be used + + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A diagonal preconditioner. The preconditioner uses the inverse + of the matrix diagonal as preconditioning values. + + + + + The inverse of the matrix diagonal. + + + + + Returns the decomposed matrix diagonal. + + The matrix diagonal. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + A Generalized Product Bi-Conjugate Gradient iterative matrix solver. + + + + The Generalized Product Bi-Conjugate Gradient (GPBiCG) solver is an + alternative version of the Bi-Conjugate Gradient stabilized (CG) solver. + Unlike the CG solver the GPBiCG solver can be used on + non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The GPBiCG algorithm was taken from:
+ GPBiCG(m,l): A hybrid of BiCGSTAB and GPBiCG methods with + efficiency and robustness +
+ S. Fujino +
+ Applied Numerical Mathematics, Volume 41, 2002, pp 107 - 117 +
+
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Indicates the number of BiCGStab steps should be taken + before switching. + + + + + Indicates the number of GPBiCG steps should be taken + before switching. + + + + + Gets or sets the number of steps taken with the BiCgStab algorithm + before switching over to the GPBiCG algorithm. + + + + + Gets or sets the number of steps taken with the GPBiCG algorithm + before switching over to the BiCgStab algorithm. + + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Decide if to do steps with BiCgStab + + Number of iteration + true if yes, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + An incomplete, level 0, LU factorization preconditioner. + + + The ILU(0) algorithm was taken from:
+ Iterative methods for sparse linear systems
+ Yousef Saad
+ Algorithm is described in Chapter 10, section 10.3.2, page 275
+
+
+ + + The matrix holding the lower (L) and upper (U) matrices. The + decomposition matrices are combined to reduce storage. + + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + A new matrix containing the lower triagonal elements. + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + This class performs an Incomplete LU factorization with drop tolerance + and partial pivoting. The drop tolerance indicates which additional entries + will be dropped from the factorized LU matrices. + + + The ILUTP-Mem algorithm was taken from:
+ ILUTP_Mem: a Space-Efficient Incomplete LU Preconditioner +
+ Tzu-Yi Chen, Department of Mathematics and Computer Science,
+ Pomona College, Claremont CA 91711, USA
+ Published in:
+ Lecture Notes in Computer Science
+ Volume 3046 / 2004
+ pp. 20 - 28
+ Algorithm is described in Section 2, page 22 +
+
+ + + The default fill level. + + + + + The default drop tolerance. + + + + + The decomposed upper triangular matrix. + + + + + The decomposed lower triangular matrix. + + + + + The array containing the pivot values. + + + + + The fill level. + + + + + The drop tolerance. + + + + + The pivot tolerance. + + + + + Initializes a new instance of the class with the default settings. + + + + + Initializes a new instance of the class with the specified settings. + + + The amount of fill that is allowed in the matrix. The value is a fraction of + the number of non-zero entries in the original matrix. Values should be positive. + + + The absolute drop tolerance which indicates below what absolute value an entry + will be dropped from the matrix. A drop tolerance of 0.0 means that no values + will be dropped. Values should always be positive. + + + The pivot tolerance which indicates at what level pivoting will take place. A + value of 0.0 means that no pivoting will take place. + + + + + Gets or sets the amount of fill that is allowed in the matrix. The + value is a fraction of the number of non-zero entries in the original + matrix. The standard value is 200. + + + + Values should always be positive and can be higher than 1.0. A value lower + than 1.0 means that the eventual preconditioner matrix will have fewer + non-zero entries as the original matrix. A value higher than 1.0 means that + the eventual preconditioner can have more non-zero values than the original + matrix. + + + Note that any changes to the FillLevel after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the absolute drop tolerance which indicates below what absolute value + an entry will be dropped from the matrix. The standard value is 0.0001. + + + + The values should always be positive and can be larger than 1.0. A low value will + keep more small numbers in the preconditioner matrix. A high value will remove + more small numbers from the preconditioner matrix. + + + Note that any changes to the DropTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the pivot tolerance which indicates at what level pivoting will + take place. The standard value is 0.0 which means pivoting will never take place. + + + + The pivot tolerance is used to calculate if pivoting is necessary. Pivoting + will take place if any of the values in a row is bigger than the + diagonal value of that row divided by the pivot tolerance, i.e. pivoting + will take place if row(i,j) > row(i,i) / PivotTolerance for + any j that is not equal to i. + + + Note that any changes to the PivotTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the lower triagonal elements. + + + + Returns the pivot array. This array is not needed for normal use because + the preconditioner will return the solution vector values in the proper order. + + + This method is used for debugging purposes only and should normally not be used. + + The pivot array. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. Note that the + method takes a general matrix type. However internally the data is stored + as a sparse matrix. Therefore it is not recommended to pass a dense matrix. + + If is . + If is not a square matrix. + + + + Pivot elements in the according to internal pivot array + + Row to pivot in + + + + Was pivoting already performed + + Pivots already done + Current item to pivot + true if performed, otherwise false + + + + Swap columns in the + + Source . + First column index to swap + Second column index to swap + + + + Sort vector descending, not changing vector but placing sorted indicies to + + Start sort form + Sort till upper bound + Array with sorted vector indicies + Source + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + Pivot elements in according to internal pivot array + + Source . + Result after pivoting. + + + + An element sort algorithm for the class. + + + This sort algorithm is used to sort the columns in a sparse matrix based on + the value of the element on the diagonal of the matrix. + + + + + Sorts the elements of the vector in decreasing + fashion. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Sorts the elements of the vector in decreasing + fashion using heap sort algorithm. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Build heap for double indicies + + Root position + Length of + Indicies of + Target + + + + Sift double indicies + + Indicies of + Target + Root position + Length of + + + + Sorts the given integers in a decreasing fashion. + + The values. + + + + Sort the given integers in a decreasing fashion using heapsort algorithm + + Array of values to sort + Length of + + + + Build heap + + Target values array + Root position + Length of + + + + Sift values + + Target value array + Root position + Length of + + + + Exchange values in array + + Target values array + First value to exchange + Second value to exchange + + + + A simple milu(0) preconditioner. + + + Original Fortran code by Youcef Saad (07 January 2004) + + + + Use modified or standard ILU(0) + + + + Gets or sets a value indicating whether to use modified or standard ILU(0). + + + + + Gets a value indicating whether the preconditioner is initialized. + + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square or is not an + instance of SparseCompressedRowMatrixStorage. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector b. + The left hand side vector x. + + + + MILU0 is a simple milu(0) preconditioner. + + Order of the matrix. + Matrix values in CSR format (input). + Column indices (input). + Row pointers (input). + Matrix values in MSR format (output). + Row pointers and column indices (output). + Pointer to diagonal elements (output). + True if the modified/MILU algorithm should be used (recommended) + Returns 0 on success or k > 0 if a zero pivot was encountered at step k. + + + + A Multiple-Lanczos Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Multiple-Lanczos Bi-Conjugate Gradient stabilized (ML(k)-BiCGStab) solver is an 'improvement' + of the standard BiCgStab solver. + + + The algorithm was taken from:
+ ML(k)BiCGSTAB: A BiCGSTAB variant based on multiple Lanczos starting vectors +
+ Man-chung Yeung and Tony F. Chan +
+ SIAM Journal of Scientific Computing +
+ Volume 21, Number 4, pp. 1263 - 1290 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + The default number of starting vectors. + + + + + The collection of starting vectors which are used as the basis for the Krylov sub-space. + + + + + The number of starting vectors used by the algorithm + + + + + Gets or sets the number of starting vectors. + + + Must be larger than 1 and smaller than the number of variables in the matrix that + for which this solver will be used. + + + + + Resets the number of starting vectors to the default value. + + + + + Gets or sets a series of orthonormal vectors which will be used as basis for the + Krylov sub-space. + + + + + Gets the number of starting vectors to create + + Maximum number + Number of variables + Number of starting vectors to create + + + + Returns an array of starting vectors. + + The maximum number of starting vectors that should be created. + The number of variables. + + An array with starting vectors. The array will never be larger than the + but it may be smaller if + the is smaller than + the . + + + + + Create random vectors array + + Number of vectors + Size of each vector + Array of random vectors + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Source A. + Residual data. + x data. + b data. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Transpose Free Quasi-Minimal Residual (TFQMR) iterative matrix solver. + + + + The TFQMR algorithm was taken from:
+ Iterative methods for sparse linear systems. +
+ Yousef Saad +
+ Algorithm is described in Chapter 7, section 7.4.3, page 219 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Is even? + + Number to check + true if even, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Matrix with sparse storage, intended for very large matrices where most of the cells are zero. + The underlying storage scheme is 3-array compressed-sparse-row (CSR) Format. + Wikipedia - CSR. + + + + + Gets the number of non zero elements in the matrix. + + The number of non zero elements. + + + + Create a new sparse matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new sparse matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable. + The enumerable is assumed to be in row-major order (row by row). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + + Create a new sparse matrix with the given number of rows and columns as a copy of the given array. + The array is assumed to be in column-major order (column by column). + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix and initialize each value to the same provided value. + + + + + Create a new sparse matrix and initialize each value using the provided init function. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Evaluates whether this matrix is symmetric. + + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + A vector with sparse storage, intended for very large vectors where most of the cells are zero. + + The sparse vector is not thread safe. + + + + Gets the number of non zero elements in the vector. + + The number of non zero elements. + + + + Create a new sparse vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new sparse vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new sparse vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector and initialize each value using the provided value. + + + + + Create a new sparse vector and initialize each value using the provided init function. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + Warning, the new 'sparse vector' with a non-zero scalar added to it will be a 100% filled + sparse vector and very inefficient. Would be better to work with a dense vector instead. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Negates vector and saves result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Multiplies a vector with a scalar. + + The vector to scale. + The scalar value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a scalar. + + The scalar value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a scalar. + + The vector to divide. + The scalar value. + The result of the division. + If is . + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + of each element of the vector of the given divisor. + + The vector whose elements we want to compute the modulus of. + The divisor to use, + The result of the calculation + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Creates a float sparse vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n,n,..', '(n,n,..)', '[n,n,...]', where n is a float. + + + A float sparse vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a real sparse vector to float-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a real sparse vector to float-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + float version of the class. + + + + + Initializes a new instance of the Vector class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Conjugates vector and save result to + + Target vector + + + + Negates vector and saves result to + + Target vector + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Divides each element of the vector by a scalar and stores the result in the result vector. + + + The scalar to divide with. + + + The vector to store the result of the division. + + + + + Divides a scalar by each element of the vector and stores the result in the result vector. + + The scalar to divide. + The vector to store the result of the division. + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise raise this vector to an exponent and store the result into the result vector. + + The exponent to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Returns the value of the absolute minimum element. + + The value of the absolute minimum element. + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the value of the absolute maximum element. + + The value of the absolute maximum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + + The p value. + + + Scalar ret = ( ∑|At(i)|^p )^(1/p) + + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Normalizes this vector to a unit vector with respect to the p-norm. + + + The p value. + + + This vector normalized to a unit vector with respect to the p-norm. + + + + + A Matrix class with dense storage. The underlying storage is a one dimensional array in column-major order (column by column). + + + + + Number of rows. + + Using this instead of the RowCount property to speed up calculating + a matrix index in the data array. + + + + Number of columns. + + Using this instead of the ColumnCount property to speed up calculating + a matrix index in the data array. + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new dense matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new dense matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to be in column-major order (column by column) and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + + Create a new dense matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable. + The enumerable is assumed to be in column-major order (column by column). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix and initialize each value to the same provided value. + + + + + Create a new dense matrix and initialize each value using the provided init function. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new dense matrix with values sampled from the provided random distribution. + + + + + Gets the matrix's data. + + The matrix's data. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of add + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the matrix and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Evaluates whether this matrix is symmetric. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A vector using dense storage. + + + + + Number of elements + + + + + Gets the vector's data. + + + + + Create a new dense vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new dense vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new dense vector directly binding to a raw array. + The array is used directly without copying. + Very efficient, but changes to the array and the vector will affect each other. + + + + + Create a new dense vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector and initialize each value using the provided value. + + + + + Create a new dense vector and initialize each value using the provided init function. + + + + + Create a new dense vector with values sampled from the provided random distribution. + + + + + Gets the vector's data. + + The vector's data. + + + + Returns a reference to the internal data structure. + + The DenseVector whose internal data we are + returning. + + A reference to the internal date of the given vector. + + + + + Returns a vector bound directly to a reference of the provided array. + + The array to bind to the DenseVector object. + + A DenseVector whose values are bound to the given array. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + The scalar to add. + The vector to store the result of the addition. + + + + Adds another vector to this vector and stores the result into the result vector. + + The vector to add to this one. + The vector to store the result of the addition. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The vector to store the result of the subtraction. + + + + Subtracts another vector from this vector and stores the result into the result vector. + + The vector to subtract from this one. + The vector to store the result of the subtraction. + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Negates vector and saves result to + + Target vector + + + + Conjugates vector and save result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + The scalar to multiply. + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Multiplies a vector with a complex. + + The vector to scale. + The Complex value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a complex. + + The Complex value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a complex. + + The vector to divide. + The Complex value. + The result of the division. + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Creates a Complex dense vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n;n;..', '(n;n;..)', '[n;n;...]', where n is a double. + + + A Complex dense vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a complex dense vector to double-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a complex dense vector to double-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + A matrix type for diagonal matrices. + + + Diagonal matrices can be non-square matrices but the diagonal always starts + at element 0,0. A diagonal matrix will throw an exception if non diagonal + entries are set. The exception to this is when the off diagonal elements are + 0.0 or NaN; these settings will cause no change to the diagonal matrix. + + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new diagonal matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All diagonal cells of the matrix will be initialized to the provided value, all non-diagonal ones to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to contain the diagonal elements only and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new diagonal matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + The matrix to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + The array to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided enumerable. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new diagonal matrix with diagonal values sampled from the provided random distribution. + + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + If the result matrix's dimensions are not the same as this matrix. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to add. + The matrix to store the result of the division. + + + + Computes the determinant of this matrix. + + The determinant of this matrix. + + + + Returns the elements of the diagonal in a . + + The elements of the diagonal. + For non-square matrices, the method returns Min(Rows, Columns) elements where + i == j (i is the row index, and j is the column index). + + + + Copies the values of the given array to the diagonal. + + The array to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + + Copies the values of the given to the diagonal. + + The vector to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced L2 norm of the matrix. + The largest singular value of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the Frobenius norm of this matrix. + The Frobenius norm of this matrix. + + + Calculates the condition number of this matrix. + The condition number of the matrix. + + + Computes the inverse of this matrix. + If is not a square matrix. + If is singular. + The inverse of this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Creates a matrix that contains the values from the requested sub-matrix. + + The row to start copying from. + The number of rows to copy. Must be positive. + The column to start copying from. + The number of columns to copy. Must be positive. + The requested sub-matrix. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + If or + is not positive. + + + + Permute the columns of a matrix according to a permutation. + + The column permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Permute the rows of a matrix according to a permutation. + + The row permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Evaluates whether this matrix is symmetric. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A class which encapsulates the functionality of a Cholesky factorization. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Gets the determinant of the matrix for which the Cholesky matrix was computed. + + + + + Gets the log determinant of the matrix for which the Cholesky matrix was computed. + + + + + A class which encapsulates the functionality of a Cholesky factorization for dense matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a complex matrix. + + + If A is hermitan, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is hermitan. + I.e. A = V*D*V' and V*VH=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Reduces a complex hermitian matrix to a real symmetric tridiagonal matrix using unitary similarity transformations. + + Source matrix to reduce + Output: Arrays for internal storage of real parts of eigenvalues + Output: Arrays for internal storage of imaginary parts of eigenvalues + Output: Arrays that contains further information about the transformations. + Order of initial matrix + This is derived from the Algol procedures HTRIDI by + Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + Data array of matrix V (eigenvectors) + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Determines eigenvectors by undoing the symmetric tridiagonalize transformation + + Data array of matrix V (eigenvectors) + Previously tridiagonalized matrix by . + Contains further information about the transformations + Input matrix order + This is derived from the Algol procedures HTRIBK, by + by Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Nonsymmetric reduction to Hessenberg form. + + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + Data array of the eigenvectors + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any complex square matrix A may be decomposed as A = QR where Q is an unitary mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an unitary matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Factorize matrix using the modified Gram-Schmidt method. + + Initial matrix. On exit is replaced by Q. + Number of rows in Q. + Number of columns in Q. + On exit is filled by R. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Gets or sets Tau vector. Contains additional information on Q - used for native solver. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The type of QR factorization to perform. + If is null. + If row count is less then column count + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + If SVD algorithm failed to converge with matrix . + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Gets the absolute value of determinant of the square matrix for which the EVD was computed. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + In the Math.Net implementation we also store a set of pivot elements for increased + numerical stability. The pivot elements encode a permutation matrix P such that P*A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Gets the determinant of the matrix for which the LU factorization was computed. + + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A (m x n) may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + If a factorization is peformed, the resulting Q matrix is an m x m matrix + and the R matrix is an m x n matrix. If a factorization is performed, the + resulting Q matrix is an m x n matrix and the R matrix is an n x n matrix. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD). + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets the two norm of the . + + The 2-norm of the . + + + + Gets the condition number max(S) / min(S) + + The condition number. + + + + Gets the determinant of the square matrix for which the SVD was computed. + + + + + A class which encapsulates the functionality of a Cholesky factorization for user matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a complex matrix. + + + If A is hermitan, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is hermitan. + I.e. A = V*D*V' and V*VH=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Reduces a complex hermitian matrix to a real symmetric tridiagonal matrix using unitary similarity transformations. + + Source matrix to reduce + Output: Arrays for internal storage of real parts of eigenvalues + Output: Arrays for internal storage of imaginary parts of eigenvalues + Output: Arrays that contains further information about the transformations. + Order of initial matrix + This is derived from the Algol procedures HTRIDI by + Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + The eigen vectors to work on. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Determines eigenvectors by undoing the symmetric tridiagonalize transformation + + The eigen vectors to work on. + Previously tridiagonalized matrix by . + Contains further information about the transformations + Input matrix order + This is derived from the Algol procedures HTRIBK, by + by Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Nonsymmetric reduction to Hessenberg form. + + The eigen vectors to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + The eigen vectors to work on. + The eigen values to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any complex square matrix A may be decomposed as A = QR where Q is an unitary mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an unitary matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The QR factorization method to use. + If is null. + + + + Generate column from initial matrix to work array + + Initial matrix + The first row + Column index + Generated vector + + + + Perform calculation of Q or R + + Work array + Q or R matrices + The first row + The last row + The first column + The last column + Number of available CPUs + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + + + + + Calculates absolute value of multiplied on signum function of + + Complex value z1 + Complex value z2 + Result multiplication of signum function and absolute value + + + + Interchanges two vectors and + + Source matrix + The number of rows in + Column A index to swap + Column B index to swap + + + + Scale column by starting from row + + Source matrix + The number of rows in + Column to scale + Row to scale from + Scale value + + + + Scale vector by starting from index + + Source vector + Row to scale from + Scale value + + + + Given the Cartesian coordinates (da, db) of a point p, these fucntion return the parameters da, db, c, and s + associated with the Givens rotation that zeros the y-coordinate of the point. + + Provides the x-coordinate of the point p. On exit contains the parameter r associated with the Givens rotation + Provides the y-coordinate of the point p. On exit contains the parameter z associated with the Givens rotation + Contains the parameter c associated with the Givens rotation + Contains the parameter s associated with the Givens rotation + This is equivalent to the DROTG LAPACK routine. + + + + Calculate Norm 2 of the column in matrix starting from row + + Source matrix + The number of rows in + Column index + Start row index + Norm2 (Euclidean norm) of the column + + + + Calculate Norm 2 of the vector starting from index + + Source vector + Start index + Norm2 (Euclidean norm) of the vector + + + + Calculate dot product of and conjugating the first vector. + + Source matrix + The number of rows in + Index of column A + Index of column B + Starting row index + Dot product value + + + + Performs rotation of points in the plane. Given two vectors x and y , + each vector element of these vectors is replaced as follows: x(i) = c*x(i) + s*y(i); y(i) = c*y(i) - s*x(i) + + Source matrix + The number of rows in + Index of column A + Index of column B + scalar cos value + scalar sin value + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Complex version of the class. + + + + + Initializes a new instance of the Matrix class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Returns the conjugate transpose of this matrix. + + The conjugate transpose of this matrix. + + + + Puts the conjugate transpose of this matrix into the result matrix. + + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to divide by each element of the matrix. + The matrix to store the result of the division. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The matrix to store the result of the pointwise power. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Pointwise applies the exponential function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Computes the Moore-Penrose Pseudo-Inverse of this matrix. + + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Calculates the p-norms of all row vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the p-norms of all column vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all row vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all column vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the value sum of each row vector. + + + + + Calculates the absolute value sum of each row vector. + + + + + Calculates the value sum of each column vector. + + + + + Calculates the absolute value sum of each column vector. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Bi-Conjugate Gradient Stabilized (BiCGStab) solver is an 'improvement' + of the standard Conjugate Gradient (CG) solver. Unlike the CG solver the + BiCGStab can be used on non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The Bi-CGSTAB algorithm was taken from:
+ Templates for the solution of linear systems: Building blocks + for iterative methods +
+ Richard Barrett, Michael Berry, Tony F. Chan, James Demmel, + June M. Donato, Jack Dongarra, Victor Eijkhout, Roldan Pozo, + Charles Romine and Henk van der Vorst +
+ Url: http://www.netlib.org/templates/Templates.html +
+ Algorithm is described in Chapter 2, section 2.3.8, page 27 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient , A. + The solution , b. + The result , x. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A composite matrix solver. The actual solver is made by a sequence of + matrix solvers. + + + + Solver based on:
+ Faster PDE-based simulations using robust composite linear solvers
+ S. Bhowmicka, P. Raghavan a,*, L. McInnes b, B. Norris
+ Future Generation Computer Systems, Vol 20, 2004, pp 373–387
+
+ + Note that if an iterator is passed to this solver it will be used for all the sub-solvers. + +
+
+ + + The collection of solvers that will be used + + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A diagonal preconditioner. The preconditioner uses the inverse + of the matrix diagonal as preconditioning values. + + + + + The inverse of the matrix diagonal. + + + + + Returns the decomposed matrix diagonal. + + The matrix diagonal. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + A Generalized Product Bi-Conjugate Gradient iterative matrix solver. + + + + The Generalized Product Bi-Conjugate Gradient (GPBiCG) solver is an + alternative version of the Bi-Conjugate Gradient stabilized (CG) solver. + Unlike the CG solver the GPBiCG solver can be used on + non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The GPBiCG algorithm was taken from:
+ GPBiCG(m,l): A hybrid of BiCGSTAB and GPBiCG methods with + efficiency and robustness +
+ S. Fujino +
+ Applied Numerical Mathematics, Volume 41, 2002, pp 107 - 117 +
+
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Indicates the number of BiCGStab steps should be taken + before switching. + + + + + Indicates the number of GPBiCG steps should be taken + before switching. + + + + + Gets or sets the number of steps taken with the BiCgStab algorithm + before switching over to the GPBiCG algorithm. + + + + + Gets or sets the number of steps taken with the GPBiCG algorithm + before switching over to the BiCgStab algorithm. + + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Decide if to do steps with BiCgStab + + Number of iteration + true if yes, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + An incomplete, level 0, LU factorization preconditioner. + + + The ILU(0) algorithm was taken from:
+ Iterative methods for sparse linear systems
+ Yousef Saad
+ Algorithm is described in Chapter 10, section 10.3.2, page 275
+
+
+ + + The matrix holding the lower (L) and upper (U) matrices. The + decomposition matrices are combined to reduce storage. + + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + A new matrix containing the lower triagonal elements. + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + This class performs an Incomplete LU factorization with drop tolerance + and partial pivoting. The drop tolerance indicates which additional entries + will be dropped from the factorized LU matrices. + + + The ILUTP-Mem algorithm was taken from:
+ ILUTP_Mem: a Space-Efficient Incomplete LU Preconditioner +
+ Tzu-Yi Chen, Department of Mathematics and Computer Science,
+ Pomona College, Claremont CA 91711, USA
+ Published in:
+ Lecture Notes in Computer Science
+ Volume 3046 / 2004
+ pp. 20 - 28
+ Algorithm is described in Section 2, page 22 +
+
+ + + The default fill level. + + + + + The default drop tolerance. + + + + + The decomposed upper triangular matrix. + + + + + The decomposed lower triangular matrix. + + + + + The array containing the pivot values. + + + + + The fill level. + + + + + The drop tolerance. + + + + + The pivot tolerance. + + + + + Initializes a new instance of the class with the default settings. + + + + + Initializes a new instance of the class with the specified settings. + + + The amount of fill that is allowed in the matrix. The value is a fraction of + the number of non-zero entries in the original matrix. Values should be positive. + + + The absolute drop tolerance which indicates below what absolute value an entry + will be dropped from the matrix. A drop tolerance of 0.0 means that no values + will be dropped. Values should always be positive. + + + The pivot tolerance which indicates at what level pivoting will take place. A + value of 0.0 means that no pivoting will take place. + + + + + Gets or sets the amount of fill that is allowed in the matrix. The + value is a fraction of the number of non-zero entries in the original + matrix. The standard value is 200. + + + + Values should always be positive and can be higher than 1.0. A value lower + than 1.0 means that the eventual preconditioner matrix will have fewer + non-zero entries as the original matrix. A value higher than 1.0 means that + the eventual preconditioner can have more non-zero values than the original + matrix. + + + Note that any changes to the FillLevel after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the absolute drop tolerance which indicates below what absolute value + an entry will be dropped from the matrix. The standard value is 0.0001. + + + + The values should always be positive and can be larger than 1.0. A low value will + keep more small numbers in the preconditioner matrix. A high value will remove + more small numbers from the preconditioner matrix. + + + Note that any changes to the DropTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the pivot tolerance which indicates at what level pivoting will + take place. The standard value is 0.0 which means pivoting will never take place. + + + + The pivot tolerance is used to calculate if pivoting is necessary. Pivoting + will take place if any of the values in a row is bigger than the + diagonal value of that row divided by the pivot tolerance, i.e. pivoting + will take place if row(i,j) > row(i,i) / PivotTolerance for + any j that is not equal to i. + + + Note that any changes to the PivotTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the lower triagonal elements. + + + + Returns the pivot array. This array is not needed for normal use because + the preconditioner will return the solution vector values in the proper order. + + + This method is used for debugging purposes only and should normally not be used. + + The pivot array. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. Note that the + method takes a general matrix type. However internally the data is stored + as a sparse matrix. Therefore it is not recommended to pass a dense matrix. + + If is . + If is not a square matrix. + + + + Pivot elements in the according to internal pivot array + + Row to pivot in + + + + Was pivoting already performed + + Pivots already done + Current item to pivot + true if performed, otherwise false + + + + Swap columns in the + + Source . + First column index to swap + Second column index to swap + + + + Sort vector descending, not changing vector but placing sorted indicies to + + Start sort form + Sort till upper bound + Array with sorted vector indicies + Source + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + Pivot elements in according to internal pivot array + + Source . + Result after pivoting. + + + + An element sort algorithm for the class. + + + This sort algorithm is used to sort the columns in a sparse matrix based on + the value of the element on the diagonal of the matrix. + + + + + Sorts the elements of the vector in decreasing + fashion. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Sorts the elements of the vector in decreasing + fashion using heap sort algorithm. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Build heap for double indicies + + Root position + Length of + Indicies of + Target + + + + Sift double indicies + + Indicies of + Target + Root position + Length of + + + + Sorts the given integers in a decreasing fashion. + + The values. + + + + Sort the given integers in a decreasing fashion using heapsort algorithm + + Array of values to sort + Length of + + + + Build heap + + Target values array + Root position + Length of + + + + Sift values + + Target value array + Root position + Length of + + + + Exchange values in array + + Target values array + First value to exchange + Second value to exchange + + + + A simple milu(0) preconditioner. + + + Original Fortran code by Youcef Saad (07 January 2004) + + + + Use modified or standard ILU(0) + + + + Gets or sets a value indicating whether to use modified or standard ILU(0). + + + + + Gets a value indicating whether the preconditioner is initialized. + + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square or is not an + instance of SparseCompressedRowMatrixStorage. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector b. + The left hand side vector x. + + + + MILU0 is a simple milu(0) preconditioner. + + Order of the matrix. + Matrix values in CSR format (input). + Column indices (input). + Row pointers (input). + Matrix values in MSR format (output). + Row pointers and column indices (output). + Pointer to diagonal elements (output). + True if the modified/MILU algorithm should be used (recommended) + Returns 0 on success or k > 0 if a zero pivot was encountered at step k. + + + + A Multiple-Lanczos Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Multiple-Lanczos Bi-Conjugate Gradient stabilized (ML(k)-BiCGStab) solver is an 'improvement' + of the standard BiCgStab solver. + + + The algorithm was taken from:
+ ML(k)BiCGSTAB: A BiCGSTAB variant based on multiple Lanczos starting vectors +
+ Man-chung Yeung and Tony F. Chan +
+ SIAM Journal of Scientific Computing +
+ Volume 21, Number 4, pp. 1263 - 1290 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + The default number of starting vectors. + + + + + The collection of starting vectors which are used as the basis for the Krylov sub-space. + + + + + The number of starting vectors used by the algorithm + + + + + Gets or sets the number of starting vectors. + + + Must be larger than 1 and smaller than the number of variables in the matrix that + for which this solver will be used. + + + + + Resets the number of starting vectors to the default value. + + + + + Gets or sets a series of orthonormal vectors which will be used as basis for the + Krylov sub-space. + + + + + Gets the number of starting vectors to create + + Maximum number + Number of variables + Number of starting vectors to create + + + + Returns an array of starting vectors. + + The maximum number of starting vectors that should be created. + The number of variables. + + An array with starting vectors. The array will never be larger than the + but it may be smaller if + the is smaller than + the . + + + + + Create random vecrors array + + Number of vectors + Size of each vector + Array of random vectors + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Source A. + Residual data. + x data. + b data. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Transpose Free Quasi-Minimal Residual (TFQMR) iterative matrix solver. + + + + The TFQMR algorithm was taken from:
+ Iterative methods for sparse linear systems. +
+ Yousef Saad +
+ Algorithm is described in Chapter 7, section 7.4.3, page 219 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Is even? + + Number to check + true if even, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Matrix with sparse storage, intended for very large matrices where most of the cells are zero. + The underlying storage scheme is 3-array compressed-sparse-row (CSR) Format. + Wikipedia - CSR. + + + + + Gets the number of non zero elements in the matrix. + + The number of non zero elements. + + + + Create a new sparse matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new sparse matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable. + The enumerable is assumed to be in row-major order (row by row). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + + Create a new sparse matrix with the given number of rows and columns as a copy of the given array. + The array is assumed to be in column-major order (column by column). + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix and initialize each value to the same provided value. + + + + + Create a new sparse matrix and initialize each value using the provided init function. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Evaluates whether this matrix is symmetric. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + A vector with sparse storage, intended for very large vectors where most of the cells are zero. + + The sparse vector is not thread safe. + + + + Gets the number of non zero elements in the vector. + + The number of non zero elements. + + + + Create a new sparse vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new sparse vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new sparse vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector and initialize each value using the provided value. + + + + + Create a new sparse vector and initialize each value using the provided init function. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + Warning, the new 'sparse vector' with a non-zero scalar added to it will be a 100% filled + sparse vector and very inefficient. Would be better to work with a dense vector instead. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Negates vector and saves result to + + Target vector + + + + Conjugates vector and save result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Multiplies a vector with a complex. + + The vector to scale. + The complex value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a complex. + + The complex value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a complex. + + The vector to divide. + The complex value. + The result of the division. + If is . + + + + Computes the modulus of each element of the vector of the given divisor. + + The vector whose elements we want to compute the modulus of. + The divisor to use, + The result of the calculation + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Creates a double sparse vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n;n;..', '(n;n;..)', '[n;n;...]', where n is a Complex. + + + A double sparse vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a complex sparse vector to double-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a complex sparse vector to double-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Complex version of the class. + + + + + Initializes a new instance of the Vector class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Conjugates vector and save result to + + Target vector + + + + Negates vector and saves result to + + Target vector + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Divides each element of the vector by a scalar and stores the result in the result vector. + + + The scalar to divide with. + + + The vector to store the result of the division. + + + + + Divides a scalar by each element of the vector and stores the result in the result vector. + + The scalar to divide. + The vector to store the result of the division. + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise raise this vector to an exponent and store the result into the result vector. + + The exponent to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Returns the value of the absolute minimum element. + + The value of the absolute minimum element. + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the value of the absolute maximum element. + + The value of the absolute maximum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + + The p value. + + + Scalar ret = ( ∑|At(i)|^p )^(1/p) + + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Normalizes this vector to a unit vector with respect to the p-norm. + + + The p value. + + + This vector normalized to a unit vector with respect to the p-norm. + + + + + A Matrix class with dense storage. The underlying storage is a one dimensional array in column-major order (column by column). + + + + + Number of rows. + + Using this instead of the RowCount property to speed up calculating + a matrix index in the data array. + + + + Number of columns. + + Using this instead of the ColumnCount property to speed up calculating + a matrix index in the data array. + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new dense matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new dense matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to be in column-major order (column by column) and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + + Create a new dense matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable. + The enumerable is assumed to be in column-major order (column by column). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix and initialize each value to the same provided value. + + + + + Create a new dense matrix and initialize each value using the provided init function. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new dense matrix with values sampled from the provided random distribution. + + + + + Gets the matrix's data. + + The matrix's data. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of add + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the matrix and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Evaluates whether this matrix is symmetric. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A vector using dense storage. + + + + + Number of elements + + + + + Gets the vector's data. + + + + + Create a new dense vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new dense vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new dense vector directly binding to a raw array. + The array is used directly without copying. + Very efficient, but changes to the array and the vector will affect each other. + + + + + Create a new dense vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector and initialize each value using the provided value. + + + + + Create a new dense vector and initialize each value using the provided init function. + + + + + Create a new dense vector with values sampled from the provided random distribution. + + + + + Gets the vector's data. + + The vector's data. + + + + Returns a reference to the internal data structure. + + The DenseVector whose internal data we are + returning. + + A reference to the internal date of the given vector. + + + + + Returns a vector bound directly to a reference of the provided array. + + The array to bind to the DenseVector object. + + A DenseVector whose values are bound to the given array. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + The scalar to add. + The vector to store the result of the addition. + + + + Adds another vector to this vector and stores the result into the result vector. + + The vector to add to this one. + The vector to store the result of the addition. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The vector to store the result of the subtraction. + + + + Subtracts another vector from this vector and stores the result into the result vector. + + The vector to subtract from this one. + The vector to store the result of the subtraction. + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Negates vector and saves result to + + Target vector + + + + Conjugates vector and save result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + The scalar to multiply. + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Multiplies a vector with a complex. + + The vector to scale. + The Complex32 value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a complex. + + The Complex32 value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a complex. + + The vector to divide. + The Complex32 value. + The result of the division. + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Creates a Complex32 dense vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n;n;..', '(n;n;..)', '[n;n;...]', where n is a double. + + + A Complex32 dense vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a complex dense vector to double-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a complex dense vector to double-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + A matrix type for diagonal matrices. + + + Diagonal matrices can be non-square matrices but the diagonal always starts + at element 0,0. A diagonal matrix will throw an exception if non diagonal + entries are set. The exception to this is when the off diagonal elements are + 0.0 or NaN; these settings will cause no change to the diagonal matrix. + + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new diagonal matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All diagonal cells of the matrix will be initialized to the provided value, all non-diagonal ones to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to contain the diagonal elements only and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new diagonal matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + The matrix to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + The array to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided enumerable. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new diagonal matrix with diagonal values sampled from the provided random distribution. + + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to add. + The matrix to store the result of the division. + + + + Computes the determinant of this matrix. + + The determinant of this matrix. + + + + Returns the elements of the diagonal in a . + + The elements of the diagonal. + For non-square matrices, the method returns Min(Rows, Columns) elements where + i == j (i is the row index, and j is the column index). + + + + Copies the values of the given array to the diagonal. + + The array to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + + Copies the values of the given to the diagonal. + + The vector to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced L2 norm of the matrix. + The largest singular value of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + Calculates the condition number of this matrix. + The condition number of the matrix. + + + Computes the inverse of this matrix. + If is not a square matrix. + If is singular. + The inverse of this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Creates a matrix that contains the values from the requested sub-matrix. + + The row to start copying from. + The number of rows to copy. Must be positive. + The column to start copying from. + The number of columns to copy. Must be positive. + The requested sub-matrix. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + If or + is not positive. + + + + Permute the columns of a matrix according to a permutation. + + The column permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Permute the rows of a matrix according to a permutation. + + The row permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Evaluates whether this matrix is symmetric. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A class which encapsulates the functionality of a Cholesky factorization. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Gets the determinant of the matrix for which the Cholesky matrix was computed. + + + + + Gets the log determinant of the matrix for which the Cholesky matrix was computed. + + + + + A class which encapsulates the functionality of a Cholesky factorization for dense matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a complex matrix. + + + If A is hermitan, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is hermitan. + I.e. A = V*D*V' and V*VH=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Reduces a complex hermitian matrix to a real symmetric tridiagonal matrix using unitary similarity transformations. + + Source matrix to reduce + Output: Arrays for internal storage of real parts of eigenvalues + Output: Arrays for internal storage of imaginary parts of eigenvalues + Output: Arrays that contains further information about the transformations. + Order of initial matrix + This is derived from the Algol procedures HTRIDI by + Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + Data array of matrix V (eigenvectors) + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Determines eigenvectors by undoing the symmetric tridiagonalize transformation + + Data array of matrix V (eigenvectors) + Previously tridiagonalized matrix by . + Contains further information about the transformations + Input matrix order + This is derived from the Algol procedures HTRIBK, by + by Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Nonsymmetric reduction to Hessenberg form. + + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + Data array of the eigenvectors + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any complex square matrix A may be decomposed as A = QR where Q is an unitary mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an unitary matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Factorize matrix using the modified Gram-Schmidt method. + + Initial matrix. On exit is replaced by Q. + Number of rows in Q. + Number of columns in Q. + On exit is filled by R. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Gets or sets Tau vector. Contains additional information on Q - used for native solver. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The QR factorization method to use. + If is null. + If row count is less then column count + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + If SVD algorithm failed to converge with matrix . + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Gets the absolute value of determinant of the square matrix for which the EVD was computed. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + In the Math.Net implementation we also store a set of pivot elements for increased + numerical stability. The pivot elements encode a permutation matrix P such that P*A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Gets the determinant of the matrix for which the LU factorization was computed. + + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A (m x n) may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + If a factorization is peformed, the resulting Q matrix is an m x m matrix + and the R matrix is an m x n matrix. If a factorization is performed, the + resulting Q matrix is an m x n matrix and the R matrix is an n x n matrix. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD). + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets the two norm of the . + + The 2-norm of the . + + + + Gets the condition number max(S) / min(S) + + The condition number. + + + + Gets the determinant of the square matrix for which the SVD was computed. + + + + + A class which encapsulates the functionality of a Cholesky factorization for user matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a complex matrix. + + + If A is hermitan, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is hermitan. + I.e. A = V*D*V' and V*VH=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Reduces a complex hermitian matrix to a real symmetric tridiagonal matrix using unitary similarity transformations. + + Source matrix to reduce + Output: Arrays for internal storage of real parts of eigenvalues + Output: Arrays for internal storage of imaginary parts of eigenvalues + Output: Arrays that contains further information about the transformations. + Order of initial matrix + This is derived from the Algol procedures HTRIDI by + Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + The eigen vectors to work on. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Determines eigenvectors by undoing the symmetric tridiagonalize transformation + + The eigen vectors to work on. + Previously tridiagonalized matrix by . + Contains further information about the transformations + Input matrix order + This is derived from the Algol procedures HTRIBK, by + by Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Nonsymmetric reduction to Hessenberg form. + + The eigen vectors to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + The eigen vectors to work on. + The eigen values to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any complex square matrix A may be decomposed as A = QR where Q is an unitary mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an unitary matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The QR factorization method to use. + If is null. + + + + Generate column from initial matrix to work array + + Initial matrix + The first row + Column index + Generated vector + + + + Perform calculation of Q or R + + Work array + Q or R matrices + The first row + The last row + The first column + The last column + Number of available CPUs + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + + + + + Calculates absolute value of multiplied on signum function of + + Complex32 value z1 + Complex32 value z2 + Result multiplication of signum function and absolute value + + + + Interchanges two vectors and + + Source matrix + The number of rows in + Column A index to swap + Column B index to swap + + + + Scale column by starting from row + + Source matrix + The number of rows in + Column to scale + Row to scale from + Scale value + + + + Scale vector by starting from index + + Source vector + Row to scale from + Scale value + + + + Given the Cartesian coordinates (da, db) of a point p, these fucntion return the parameters da, db, c, and s + associated with the Givens rotation that zeros the y-coordinate of the point. + + Provides the x-coordinate of the point p. On exit contains the parameter r associated with the Givens rotation + Provides the y-coordinate of the point p. On exit contains the parameter z associated with the Givens rotation + Contains the parameter c associated with the Givens rotation + Contains the parameter s associated with the Givens rotation + This is equivalent to the DROTG LAPACK routine. + + + + Calculate Norm 2 of the column in matrix starting from row + + Source matrix + The number of rows in + Column index + Start row index + Norm2 (Euclidean norm) of the column + + + + Calculate Norm 2 of the vector starting from index + + Source vector + Start index + Norm2 (Euclidean norm) of the vector + + + + Calculate dot product of and conjugating the first vector. + + Source matrix + The number of rows in + Index of column A + Index of column B + Starting row index + Dot product value + + + + Performs rotation of points in the plane. Given two vectors x and y , + each vector element of these vectors is replaced as follows: x(i) = c*x(i) + s*y(i); y(i) = c*y(i) - s*x(i) + + Source matrix + The number of rows in + Index of column A + Index of column B + scalar cos value + scalar sin value + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Complex32 version of the class. + + + + + Initializes a new instance of the Matrix class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Returns the conjugate transpose of this matrix. + + The conjugate transpose of this matrix. + + + + Puts the conjugate transpose of this matrix into the result matrix. + + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to divide by each element of the matrix. + The matrix to store the result of the division. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The matrix to store the result of the pointwise power. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Pointwise applies the exponential function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Computes the Moore-Penrose Pseudo-Inverse of this matrix. + + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Calculates the p-norms of all row vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the p-norms of all column vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all row vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all column vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the value sum of each row vector. + + + + + Calculates the absolute value sum of each row vector. + + + + + Calculates the value sum of each column vector. + + + + + Calculates the absolute value sum of each column vector. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Bi-Conjugate Gradient Stabilized (BiCGStab) solver is an 'improvement' + of the standard Conjugate Gradient (CG) solver. Unlike the CG solver the + BiCGStab can be used on non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The Bi-CGSTAB algorithm was taken from:
+ Templates for the solution of linear systems: Building blocks + for iterative methods +
+ Richard Barrett, Michael Berry, Tony F. Chan, James Demmel, + June M. Donato, Jack Dongarra, Victor Eijkhout, Roldan Pozo, + Charles Romine and Henk van der Vorst +
+ Url: http://www.netlib.org/templates/Templates.html +
+ Algorithm is described in Chapter 2, section 2.3.8, page 27 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient , A. + The solution , b. + The result , x. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A composite matrix solver. The actual solver is made by a sequence of + matrix solvers. + + + + Solver based on:
+ Faster PDE-based simulations using robust composite linear solvers
+ S. Bhowmicka, P. Raghavan a,*, L. McInnes b, B. Norris
+ Future Generation Computer Systems, Vol 20, 2004, pp 373–387
+
+ + Note that if an iterator is passed to this solver it will be used for all the sub-solvers. + +
+
+ + + The collection of solvers that will be used + + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A diagonal preconditioner. The preconditioner uses the inverse + of the matrix diagonal as preconditioning values. + + + + + The inverse of the matrix diagonal. + + + + + Returns the decomposed matrix diagonal. + + The matrix diagonal. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + A Generalized Product Bi-Conjugate Gradient iterative matrix solver. + + + + The Generalized Product Bi-Conjugate Gradient (GPBiCG) solver is an + alternative version of the Bi-Conjugate Gradient stabilized (CG) solver. + Unlike the CG solver the GPBiCG solver can be used on + non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The GPBiCG algorithm was taken from:
+ GPBiCG(m,l): A hybrid of BiCGSTAB and GPBiCG methods with + efficiency and robustness +
+ S. Fujino +
+ Applied Numerical Mathematics, Volume 41, 2002, pp 107 - 117 +
+
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Indicates the number of BiCGStab steps should be taken + before switching. + + + + + Indicates the number of GPBiCG steps should be taken + before switching. + + + + + Gets or sets the number of steps taken with the BiCgStab algorithm + before switching over to the GPBiCG algorithm. + + + + + Gets or sets the number of steps taken with the GPBiCG algorithm + before switching over to the BiCgStab algorithm. + + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Decide if to do steps with BiCgStab + + Number of iteration + true if yes, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + An incomplete, level 0, LU factorization preconditioner. + + + The ILU(0) algorithm was taken from:
+ Iterative methods for sparse linear systems
+ Yousef Saad
+ Algorithm is described in Chapter 10, section 10.3.2, page 275
+
+
+ + + The matrix holding the lower (L) and upper (U) matrices. The + decomposition matrices are combined to reduce storage. + + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + A new matrix containing the lower triagonal elements. + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + This class performs an Incomplete LU factorization with drop tolerance + and partial pivoting. The drop tolerance indicates which additional entries + will be dropped from the factorized LU matrices. + + + The ILUTP-Mem algorithm was taken from:
+ ILUTP_Mem: a Space-Efficient Incomplete LU Preconditioner +
+ Tzu-Yi Chen, Department of Mathematics and Computer Science,
+ Pomona College, Claremont CA 91711, USA
+ Published in:
+ Lecture Notes in Computer Science
+ Volume 3046 / 2004
+ pp. 20 - 28
+ Algorithm is described in Section 2, page 22 +
+
+ + + The default fill level. + + + + + The default drop tolerance. + + + + + The decomposed upper triangular matrix. + + + + + The decomposed lower triangular matrix. + + + + + The array containing the pivot values. + + + + + The fill level. + + + + + The drop tolerance. + + + + + The pivot tolerance. + + + + + Initializes a new instance of the class with the default settings. + + + + + Initializes a new instance of the class with the specified settings. + + + The amount of fill that is allowed in the matrix. The value is a fraction of + the number of non-zero entries in the original matrix. Values should be positive. + + + The absolute drop tolerance which indicates below what absolute value an entry + will be dropped from the matrix. A drop tolerance of 0.0 means that no values + will be dropped. Values should always be positive. + + + The pivot tolerance which indicates at what level pivoting will take place. A + value of 0.0 means that no pivoting will take place. + + + + + Gets or sets the amount of fill that is allowed in the matrix. The + value is a fraction of the number of non-zero entries in the original + matrix. The standard value is 200. + + + + Values should always be positive and can be higher than 1.0. A value lower + than 1.0 means that the eventual preconditioner matrix will have fewer + non-zero entries as the original matrix. A value higher than 1.0 means that + the eventual preconditioner can have more non-zero values than the original + matrix. + + + Note that any changes to the FillLevel after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the absolute drop tolerance which indicates below what absolute value + an entry will be dropped from the matrix. The standard value is 0.0001. + + + + The values should always be positive and can be larger than 1.0. A low value will + keep more small numbers in the preconditioner matrix. A high value will remove + more small numbers from the preconditioner matrix. + + + Note that any changes to the DropTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the pivot tolerance which indicates at what level pivoting will + take place. The standard value is 0.0 which means pivoting will never take place. + + + + The pivot tolerance is used to calculate if pivoting is necessary. Pivoting + will take place if any of the values in a row is bigger than the + diagonal value of that row divided by the pivot tolerance, i.e. pivoting + will take place if row(i,j) > row(i,i) / PivotTolerance for + any j that is not equal to i. + + + Note that any changes to the PivotTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the lower triagonal elements. + + + + Returns the pivot array. This array is not needed for normal use because + the preconditioner will return the solution vector values in the proper order. + + + This method is used for debugging purposes only and should normally not be used. + + The pivot array. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. Note that the + method takes a general matrix type. However internally the data is stored + as a sparse matrix. Therefore it is not recommended to pass a dense matrix. + + If is . + If is not a square matrix. + + + + Pivot elements in the according to internal pivot array + + Row to pivot in + + + + Was pivoting already performed + + Pivots already done + Current item to pivot + true if performed, otherwise false + + + + Swap columns in the + + Source . + First column index to swap + Second column index to swap + + + + Sort vector descending, not changing vector but placing sorted indicies to + + Start sort form + Sort till upper bound + Array with sorted vector indicies + Source + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + Pivot elements in according to internal pivot array + + Source . + Result after pivoting. + + + + An element sort algorithm for the class. + + + This sort algorithm is used to sort the columns in a sparse matrix based on + the value of the element on the diagonal of the matrix. + + + + + Sorts the elements of the vector in decreasing + fashion. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Sorts the elements of the vector in decreasing + fashion using heap sort algorithm. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Build heap for double indicies + + Root position + Length of + Indicies of + Target + + + + Sift double indicies + + Indicies of + Target + Root position + Length of + + + + Sorts the given integers in a decreasing fashion. + + The values. + + + + Sort the given integers in a decreasing fashion using heapsort algorithm + + Array of values to sort + Length of + + + + Build heap + + Target values array + Root position + Length of + + + + Sift values + + Target value array + Root position + Length of + + + + Exchange values in array + + Target values array + First value to exchange + Second value to exchange + + + + A simple milu(0) preconditioner. + + + Original Fortran code by Youcef Saad (07 January 2004) + + + + Use modified or standard ILU(0) + + + + Gets or sets a value indicating whether to use modified or standard ILU(0). + + + + + Gets a value indicating whether the preconditioner is initialized. + + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square or is not an + instance of SparseCompressedRowMatrixStorage. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector b. + The left hand side vector x. + + + + MILU0 is a simple milu(0) preconditioner. + + Order of the matrix. + Matrix values in CSR format (input). + Column indices (input). + Row pointers (input). + Matrix values in MSR format (output). + Row pointers and column indices (output). + Pointer to diagonal elements (output). + True if the modified/MILU algorithm should be used (recommended) + Returns 0 on success or k > 0 if a zero pivot was encountered at step k. + + + + A Multiple-Lanczos Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Multiple-Lanczos Bi-Conjugate Gradient stabilized (ML(k)-BiCGStab) solver is an 'improvement' + of the standard BiCgStab solver. + + + The algorithm was taken from:
+ ML(k)BiCGSTAB: A BiCGSTAB variant based on multiple Lanczos starting vectors +
+ Man-chung Yeung and Tony F. Chan +
+ SIAM Journal of Scientific Computing +
+ Volume 21, Number 4, pp. 1263 - 1290 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + The default number of starting vectors. + + + + + The collection of starting vectors which are used as the basis for the Krylov sub-space. + + + + + The number of starting vectors used by the algorithm + + + + + Gets or sets the number of starting vectors. + + + Must be larger than 1 and smaller than the number of variables in the matrix that + for which this solver will be used. + + + + + Resets the number of starting vectors to the default value. + + + + + Gets or sets a series of orthonormal vectors which will be used as basis for the + Krylov sub-space. + + + + + Gets the number of starting vectors to create + + Maximum number + Number of variables + Number of starting vectors to create + + + + Returns an array of starting vectors. + + The maximum number of starting vectors that should be created. + The number of variables. + + An array with starting vectors. The array will never be larger than the + but it may be smaller if + the is smaller than + the . + + + + + Create random vectors array + + Number of vectors + Size of each vector + Array of random vectors + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Source A. + Residual data. + x data. + b data. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Transpose Free Quasi-Minimal Residual (TFQMR) iterative matrix solver. + + + + The TFQMR algorithm was taken from:
+ Iterative methods for sparse linear systems. +
+ Yousef Saad +
+ Algorithm is described in Chapter 7, section 7.4.3, page 219 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Is even? + + Number to check + true if even, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Matrix with sparse storage, intended for very large matrices where most of the cells are zero. + The underlying storage scheme is 3-array compressed-sparse-row (CSR) Format. + Wikipedia - CSR. + + + + + Gets the number of non zero elements in the matrix. + + The number of non zero elements. + + + + Create a new sparse matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new sparse matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable. + The enumerable is assumed to be in row-major order (row by row). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + + Create a new sparse matrix with the given number of rows and columns as a copy of the given array. + The array is assumed to be in column-major order (column by column). + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix and initialize each value to the same provided value. + + + + + Create a new sparse matrix and initialize each value using the provided init function. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Evaluates whether this matrix is symmetric. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + A vector with sparse storage, intended for very large vectors where most of the cells are zero. + + The sparse vector is not thread safe. + + + + Gets the number of non zero elements in the vector. + + The number of non zero elements. + + + + Create a new sparse vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new sparse vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new sparse vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector and initialize each value using the provided value. + + + + + Create a new sparse vector and initialize each value using the provided init function. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + Warning, the new 'sparse vector' with a non-zero scalar added to it will be a 100% filled + sparse vector and very inefficient. Would be better to work with a dense vector instead. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Negates vector and saves result to + + Target vector + + + + Conjugates vector and save result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Multiplies a vector with a complex. + + The vector to scale. + The complex value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a complex. + + The complex value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a complex. + + The vector to divide. + The complex value. + The result of the division. + If is . + + + + Computes the modulus of each element of the vector of the given divisor. + + The vector whose elements we want to compute the modulus of. + The divisor to use, + The result of the calculation + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Creates a double sparse vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n;n;..', '(n;n;..)', '[n;n;...]', where n is a Complex32. + + + A double sparse vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a complex sparse vector to double-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a complex sparse vector to double-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Complex32 version of the class. + + + + + Initializes a new instance of the Vector class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Conjugates vector and save result to + + Target vector + + + + Negates vector and saves result to + + Target vector + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Divides each element of the vector by a scalar and stores the result in the result vector. + + + The scalar to divide with. + + + The vector to store the result of the division. + + + + + Divides a scalar by each element of the vector and stores the result in the result vector. + + The scalar to divide. + The vector to store the result of the division. + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise raise this vector to an exponent and store the result into the result vector. + + The exponent to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Returns the value of the absolute minimum element. + + The value of the absolute minimum element. + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the value of the absolute maximum element. + + The value of the absolute maximum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + + The p value. + + + Scalar ret = ( ∑|At(i)|^p )^(1/p) + + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Normalizes this vector to a unit vector with respect to the p-norm. + + + The p value. + + + This vector normalized to a unit vector with respect to the p-norm. + + + + + Generic linear algebra type builder, for situations where a matrix or vector + must be created in a generic way. Usage of generic builders should not be + required in normal user code. + + + + + Gets the value of 0.0 for type T. + + + + + Gets the value of 1.0 for type T. + + + + + Create a new matrix straight from an initialized matrix storage instance. + If you have an instance of a discrete storage type instead, use their direct methods instead. + + + + + Create a new matrix with the same kind of the provided example. + + + + + Create a new matrix with the same kind and dimensions of the provided example. + + + + + Create a new matrix with the same kind of the provided example. + + + + + Create a new matrix with a type that can represent and is closest to both provided samples. + + + + + Create a new matrix with a type that can represent and is closest to both provided samples and the dimensions of example. + + + + + Create a new dense matrix with values sampled from the provided random distribution. + + + + + Create a new dense matrix with values sampled from the standard distribution with a system random source. + + + + + Create a new dense matrix with values sampled from the standard distribution with a system random source. + + + + + Create a new positive definite dense matrix where each value is the product + of two samples from the provided random distribution. + + + + + Create a new positive definite dense matrix where each value is the product + of two samples from the standard distribution. + + + + + Create a new positive definite dense matrix where each value is the product + of two samples from the provided random distribution. + + + + + Create a new dense matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + + + + Create a new dense matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to be in column-major order (column by column) and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + + Create a new dense matrix and initialize each value to the same provided value. + + + + + Create a new dense matrix and initialize each value using the provided init function. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value using the provided init function. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new dense matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable. + The enumerable is assumed to be in column-major order (column by column). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable. + The enumerable is assumed to be in row-major order (row by row). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix from a 2D array of existing matrices. + The matrices in the array are not required to be dense already. + If the matrices do not align properly, they are placed on the top left + corner of their cell with the remaining fields left zero. + + + + + Create a new sparse matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a sparse matrix of T with the given number of rows and columns. + + The number of rows. + The number of columns. + + + + Create a new sparse matrix and initialize each value to the same provided value. + + + + + Create a new sparse matrix and initialize each value using the provided init function. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value using the provided init function. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new sparse matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable. + The enumerable is assumed to be in row-major order (row by row). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + + Create a new sparse matrix with the given number of rows and columns as a copy of the given array. + The array is assumed to be in column-major order (column by column). + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix from a 2D array of existing matrices. + The matrices in the array are not required to be sparse already. + If the matrices do not align properly, they are placed on the top left + corner of their cell with the remaining fields left zero. + + + + + Create a new diagonal matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + + + + Create a new diagonal matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to represent the diagonal values and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new square diagonal matrix directly binding to a raw array. + The array is assumed to represent the diagonal values and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new diagonal matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal matrix and initialize each diagonal value using the provided init function. + + + + + Create a new diagonal identity matrix with a one-diagonal. + + + + + Create a new diagonal identity matrix with a one-diagonal. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Generic linear algebra type builder, for situations where a matrix or vector + must be created in a generic way. Usage of generic builders should not be + required in normal user code. + + + + + Gets the value of 0.0 for type T. + + + + + Gets the value of 1.0 for type T. + + + + + Create a new vector straight from an initialized matrix storage instance. + If you have an instance of a discrete storage type instead, use their direct methods instead. + + + + + Create a new vector with the same kind of the provided example. + + + + + Create a new vector with the same kind and dimension of the provided example. + + + + + Create a new vector with the same kind of the provided example. + + + + + Create a new vector with a type that can represent and is closest to both provided samples. + + + + + Create a new vector with a type that can represent and is closest to both provided samples and the dimensions of example. + + + + + Create a new vector with a type that can represent and is closest to both provided samples. + + + + + Create a new dense vector with values sampled from the provided random distribution. + + + + + Create a new dense vector with values sampled from the standard distribution with a system random source. + + + + + Create a new dense vector with values sampled from the standard distribution with a system random source. + + + + + Create a new dense vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a dense vector of T with the given size. + + The size of the vector. + + + + Create a dense vector of T that is directly bound to the specified array. + + + + + Create a new dense vector and initialize each value using the provided value. + + + + + Create a new dense vector and initialize each value using the provided init function. + + + + + Create a new dense vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a sparse vector of T with the given size. + + The size of the vector. + + + + Create a new sparse vector and initialize each value using the provided value. + + + + + Create a new sparse vector and initialize each value using the provided init function. + + + + + Create a new sparse vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new matrix straight from an initialized matrix storage instance. + If you have an instance of a discrete storage type instead, use their direct methods instead. + + + + + Create a new matrix with the same kind of the provided example. + + + + + Create a new matrix with the same kind and dimensions of the provided example. + + + + + Create a new matrix with the same kind of the provided example. + + + + + Create a new matrix with a type that can represent and is closest to both provided samples. + + + + + Create a new matrix with a type that can represent and is closest to both provided samples and the dimensions of example. + + + + + Create a new dense matrix with values sampled from the provided random distribution. + + + + + Create a new dense matrix with values sampled from the standard distribution with a system random source. + + + + + Create a new dense matrix with values sampled from the standard distribution with a system random source. + + + + + Create a new positive definite dense matrix where each value is the product + of two samples from the provided random distribution. + + + + + Create a new positive definite dense matrix where each value is the product + of two samples from the standard distribution. + + + + + Create a new positive definite dense matrix where each value is the product + of two samples from the provided random distribution. + + + + + Create a new dense matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + + + + Create a new dense matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to be in column-major order (column by column) and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + + Create a new dense matrix and initialize each value to the same provided value. + + + + + Create a new dense matrix and initialize each value using the provided init function. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value using the provided init function. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new dense matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable. + The enumerable is assumed to be in column-major order (column by column). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix from a 2D array of existing matrices. + The matrices in the array are not required to be dense already. + If the matrices do not align properly, they are placed on the top left + corner of their cell with the remaining fields left zero. + + + + + Create a new sparse matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a sparse matrix of T with the given number of rows and columns. + + The number of rows. + The number of columns. + + + + Create a new sparse matrix and initialize each value to the same provided value. + + + + + Create a new sparse matrix and initialize each value using the provided init function. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value using the provided init function. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new sparse matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable. + The enumerable is assumed to be in row-major order (row by row). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + + Create a new sparse matrix with the given number of rows and columns as a copy of the given array. + The array is assumed to be in column-major order (column by column). + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix from a 2D array of existing matrices. + The matrices in the array are not required to be sparse already. + If the matrices do not align properly, they are placed on the top left + corner of their cell with the remaining fields left zero. + + + + + Create a new diagonal matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + + + + Create a new diagonal matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to represent the diagonal values and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new square diagonal matrix directly binding to a raw array. + The array is assumed to represent the diagonal values and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new diagonal matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal matrix and initialize each diagonal value using the provided init function. + + + + + Create a new diagonal identity matrix with a one-diagonal. + + + + + Create a new diagonal identity matrix with a one-diagonal. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new vector straight from an initialized matrix storage instance. + If you have an instance of a discrete storage type instead, use their direct methods instead. + + + + + Create a new vector with the same kind of the provided example. + + + + + Create a new vector with the same kind and dimension of the provided example. + + + + + Create a new vector with the same kind of the provided example. + + + + + Create a new vector with a type that can represent and is closest to both provided samples. + + + + + Create a new vector with a type that can represent and is closest to both provided samples and the dimensions of example. + + + + + Create a new vector with a type that can represent and is closest to both provided samples. + + + + + Create a new dense vector with values sampled from the provided random distribution. + + + + + Create a new dense vector with values sampled from the standard distribution with a system random source. + + + + + Create a new dense vector with values sampled from the standard distribution with a system random source. + + + + + Create a new dense vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a dense vector of T with the given size. + + The size of the vector. + + + + Create a dense vector of T that is directly bound to the specified array. + + + + + Create a new dense vector and initialize each value using the provided value. + + + + + Create a new dense vector and initialize each value using the provided init function. + + + + + Create a new dense vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a sparse vector of T with the given size. + + The size of the vector. + + + + Create a new sparse vector and initialize each value using the provided value. + + + + + Create a new sparse vector and initialize each value using the provided init function. + + + + + Create a new sparse vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + A class which encapsulates the functionality of a Cholesky factorization. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + Supported data types are double, single, , and . + + + + Gets the lower triangular form of the Cholesky matrix. + + + + + Gets the determinant of the matrix for which the Cholesky matrix was computed. + + + + + Gets the log determinant of the matrix for which the Cholesky matrix was computed. + + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + Supported data types are double, single, , and . + + + + Gets or sets a value indicating whether matrix is symmetric or not + + + + + Gets the absolute value of determinant of the square matrix for which the EVD was computed. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + Gets or sets the eigen values (λ) of matrix in ascending value. + + + + + Gets or sets eigenvectors. + + + + + Gets or sets the block diagonal eigenvalue matrix. + + + + + Solves a system of linear equations, AX = B, with A EVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, AX = B, with A EVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + Supported data types are double, single, , and . + + + + Classes that solves a system of linear equations, AX = B. + + Supported data types are double, single, , and . + + + + Solves a system of linear equations, AX = B. + + The right hand side Matrix, B. + The left hand side Matrix, X. + + + + Solves a system of linear equations, AX = B. + + The right hand side Matrix, B. + The left hand side Matrix, X. + + + + Solves a system of linear equations, Ax = b + + The right hand side vector, b. + The left hand side Vector, x. + + + + Solves a system of linear equations, Ax = b. + + The right hand side vector, b. + The left hand side Matrix>, x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + In the Math.Net implementation we also store a set of pivot elements for increased + numerical stability. The pivot elements encode a permutation matrix P such that P*A = L*U. + + + The computation of the LU factorization is done at construction time. + + Supported data types are double, single, , and . + + + + Gets the lower triangular factor. + + + + + Gets the upper triangular factor. + + + + + Gets the permutation applied to LU factorization. + + + + + Gets the determinant of the matrix for which the LU factorization was computed. + + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + The type of QR factorization go perform. + + + + + Compute the full QR factorization of a matrix. + + + + + Compute the thin QR factorization of a matrix. + + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A (m x n) may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + If a factorization is performed, the resulting Q matrix is an m x m matrix + and the R matrix is an m x n matrix. If a factorization is performed, the + resulting Q matrix is an m x n matrix and the R matrix is an n x n matrix. + + Supported data types are double, single, , and . + + + + Gets or sets orthogonal Q matrix + + + + + Gets the upper triangular factor R. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD). + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + Supported data types are double, single, , and . + + + Indicating whether U and VT matrices have been computed during SVD factorization. + + + + Gets the singular values (Σ) of matrix in ascending value. + + + + + Gets the left singular vectors (U - m-by-m unitary matrix) + + + + + Gets the transpose right singular vectors (transpose of V, an n-by-n unitary matrix) + + + + + Returns the singular values as a diagonal . + + The singular values as a diagonal . + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets the two norm of the . + + The 2-norm of the . + + + + Gets the condition number max(S) / min(S) + + The condition number. + + + + Gets the determinant of the square matrix for which the SVD was computed. + + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Defines the base class for Matrix classes. + + + Defines the base class for Matrix classes. + + Supported data types are double, single, , and . + + Defines the base class for Matrix classes. + + + Defines the base class for Matrix classes. + + + + + The value of 1.0. + + + + + The value of 0.0. + + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the matrix and stores the result in the result matrix. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts each element of the matrix from a scalar and stores the result in the result matrix. + + The scalar to subtract from. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar denominator to use. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar numerator to use. + The matrix to store the result of the division. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The matrix to store the result of the pointwise power. + + + + Pointwise raise this matrix to an exponent matrix and store the result into the result matrix. + + The exponent matrix to raise this matrix values to. + The matrix to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Adds a scalar to each element of the matrix. + + The scalar to add. + The result of the addition. + If the two matrices don't have the same dimensions. + + + + Adds a scalar to each element of the matrix and stores the result in the result matrix. + + The scalar to add. + The matrix to store the result of the addition. + If the two matrices don't have the same dimensions. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The result of the addition. + If the two matrices don't have the same dimensions. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the matrix. + + The scalar to subtract. + A new matrix containing the subtraction of this matrix and the scalar. + + + + Subtracts a scalar from each element of the matrix and stores the result in the result matrix. + + The scalar to subtract. + The matrix to store the result of the subtraction. + If this matrix and are not the same size. + + + + Subtracts each element of the matrix from a scalar. + + The scalar to subtract from. + A new matrix containing the subtraction of the scalar and this matrix. + + + + Subtracts each element of the matrix from a scalar and stores the result in the result matrix. + + The scalar to subtract from. + The matrix to store the result of the subtraction. + If this matrix and are not the same size. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The result of the subtraction. + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + If the two matrices don't have the same dimensions. + + + + Multiplies each element of this matrix with a scalar. + + The scalar to multiply with. + The result of the multiplication. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + If the result matrix's dimensions are not the same as this matrix. + + + + Divides each element of this matrix with a scalar. + + The scalar to divide with. + The result of the division. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + If the result matrix's dimensions are not the same as this matrix. + + + + Divides a scalar by each element of the matrix. + + The scalar to divide. + The result of the division. + + + + Divides a scalar by each element of the matrix and places results into the result matrix. + + The scalar to divide. + The matrix to store the result of the division. + If the result matrix's dimensions are not the same as this matrix. + + + + Multiplies this matrix by a vector and returns the result. + + The vector to multiply with. + The result of the multiplication. + If this.ColumnCount != rightSide.Count. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + If result.Count != this.RowCount. + If this.ColumnCount != .Count. + + + + Left multiply a matrix with a vector ( = vector * matrix ). + + The vector to multiply with. + The result of the multiplication. + If this.RowCount != .Count. + + + + Left multiply a matrix with a vector ( = vector * matrix ) and place the result in the result vector. + + The vector to multiply with. + The result of the multiplication. + If result.Count != this.ColumnCount. + If this.RowCount != .Count. + + + + Left multiply a matrix with a vector ( = vector * matrix ) and place the result in the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + If this.Columns != other.Rows. + If the result matrix's dimensions are not the this.Rows x other.Columns. + + + + Multiplies this matrix with another matrix and returns the result. + + The matrix to multiply with. + If this.Columns != other.Rows. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + If this.Columns != other.ColumnCount. + If the result matrix's dimensions are not the this.RowCount x other.RowCount. + + + + Multiplies this matrix with transpose of another matrix and returns the result. + + The matrix to multiply with. + If this.Columns != other.ColumnCount. + The result of the multiplication. + + + + Multiplies the transpose of this matrix by a vector and returns the result. + + The vector to multiply with. + The result of the multiplication. + If this.RowCount != rightSide.Count. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + If result.Count != this.ColumnCount. + If this.RowCount != .Count. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + If this.Rows != other.RowCount. + If the result matrix's dimensions are not the this.ColumnCount x other.ColumnCount. + + + + Multiplies the transpose of this matrix with another matrix and returns the result. + + The matrix to multiply with. + If this.Rows != other.RowCount. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + If this.Columns != other.ColumnCount. + If the result matrix's dimensions are not the this.RowCount x other.RowCount. + + + + Multiplies this matrix with the conjugate transpose of another matrix and returns the result. + + The matrix to multiply with. + If this.Columns != other.ColumnCount. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix by a vector and returns the result. + + The vector to multiply with. + The result of the multiplication. + If this.RowCount != rightSide.Count. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + If result.Count != this.ColumnCount. + If this.RowCount != .Count. + + + + Multiplies the conjugate transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + If this.Rows != other.RowCount. + If the result matrix's dimensions are not the this.ColumnCount x other.ColumnCount. + + + + Multiplies the conjugate transpose of this matrix with another matrix and returns the result. + + The matrix to multiply with. + If this.Rows != other.RowCount. + The result of the multiplication. + + + + Raises this square matrix to a positive integer exponent and places the results into the result matrix. + + The positive integer exponent to raise the matrix to. + The result of the power. + + + + Multiplies this square matrix with another matrix and returns the result. + + The positive integer exponent to raise the matrix to. + + + + Negate each element of this matrix. + + A matrix containing the negated values. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + if the result matrix's dimensions are not the same as this matrix. + + + + Complex conjugate each element of this matrix. + + A matrix containing the conjugated values. + + + + Complex conjugate each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + if the result matrix's dimensions are not the same as this matrix. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the matrix. + + The scalar denominator to use. + A matrix containing the results. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the matrix. + + The scalar numerator to use. + A matrix containing the results. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the matrix. + + The scalar numerator to use. + Matrix to store the results in. + + + + Computes the remainder (matrix % divisor), where the result has the sign of the dividend, + for each element of the matrix. + + The scalar denominator to use. + A matrix containing the results. + + + + Computes the remainder (matrix % divisor), where the result has the sign of the dividend, + for each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (dividend % matrix), where the result has the sign of the dividend, + for each element of the matrix. + + The scalar numerator to use. + A matrix containing the results. + + + + Computes the remainder (dividend % matrix), where the result has the sign of the dividend, + for each element of the matrix. + + The scalar numerator to use. + Matrix to store the results in. + + + + Pointwise multiplies this matrix with another matrix. + + The matrix to pointwise multiply with this one. + If this matrix and are not the same size. + A new matrix that is the pointwise multiplication of this matrix and . + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + If this matrix and are not the same size. + If this matrix and are not the same size. + + + + Pointwise divide this matrix by another matrix. + + The pointwise denominator matrix to use. + If this matrix and are not the same size. + A new matrix that is the pointwise division of this matrix and . + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use. + The matrix to store the result of the pointwise division. + If this matrix and are not the same size. + If this matrix and are not the same size. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + + + + Pointwise raise this matrix to an exponent. + + The exponent to raise this matrix values to. + The matrix to store the result into. + If this matrix and are not the same size. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + + + + Pointwise raise this matrix to an exponent. + + The exponent to raise this matrix values to. + The matrix to store the result into. + If this matrix and are not the same size. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this matrix by another matrix. + + The pointwise denominator matrix to use. + If this matrix and are not the same size. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this matrix by another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use. + The matrix to store the result of the pointwise modulus. + If this matrix and are not the same size. + If this matrix and are not the same size. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this matrix by another matrix. + + The pointwise denominator matrix to use. + If this matrix and are not the same size. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this matrix by another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use. + The matrix to store the result of the pointwise remainder. + If this matrix and are not the same size. + If this matrix and are not the same size. + + + + Helper function to apply a unary function to a matrix. The function + f modifies the matrix given to it in place. Before its + called, a copy of the 'this' matrix is first created, then passed to + f. The copy is then returned as the result + + Function which takes a matrix, modifies it in place and returns void + New instance of matrix which is the result + + + + Helper function to apply a unary function which modifies a matrix + in place. + + Function which takes a matrix, modifies it in place and returns void + The matrix to be passed to f and where the result is to be stored + If this vector and are not the same size. + + + + Helper function to apply a binary function which takes two matrices + and modifies the latter in place. A copy of the "this" matrix is + first made and then passed to f together with the other matrix. The + copy is then returned as the result + + Function which takes two matrices, modifies the second in place and returns void + The other matrix to be passed to the function as argument. It is not modified + The resulting matrix + If this amtrix and are not the same dimension. + + + + Helper function to apply a binary function which takes two matrices + and modifies the second one in place + + Function which takes two matrices, modifies the second in place and returns void + The other matrix to be passed to the function as argument. It is not modified + The resulting matrix + If this matrix and are not the same dimension. + + + + Pointwise applies the exponent function to each value. + + + + + Pointwise applies the exponent function to each value. + + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the natural logarithm function to each value. + + + + + Pointwise applies the natural logarithm function to each value. + + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the abs function to each value + + + + + Pointwise applies the abs function to each value + + The vector to store the result + + + + Pointwise applies the acos function to each value + + + + + Pointwise applies the acos function to each value + + The vector to store the result + + + + Pointwise applies the asin function to each value + + + + + Pointwise applies the asin function to each value + + The vector to store the result + + + + Pointwise applies the atan function to each value + + + + + Pointwise applies the atan function to each value + + The vector to store the result + + + + Pointwise applies the atan2 function to each value of the current + matrix and a given other matrix being the 'x' of atan2 and the + 'this' matrix being the 'y' + + + + + + + Pointwise applies the atan2 function to each value of the current + matrix and a given other matrix being the 'x' of atan2 and the + 'this' matrix being the 'y' + + + + + + + Pointwise applies the ceiling function to each value + + + + + Pointwise applies the ceiling function to each value + + The vector to store the result + + + + Pointwise applies the cos function to each value + + + + + Pointwise applies the cos function to each value + + The vector to store the result + + + + Pointwise applies the cosh function to each value + + + + + Pointwise applies the cosh function to each value + + The vector to store the result + + + + Pointwise applies the floor function to each value + + + + + Pointwise applies the floor function to each value + + The vector to store the result + + + + Pointwise applies the log10 function to each value + + + + + Pointwise applies the log10 function to each value + + The vector to store the result + + + + Pointwise applies the round function to each value + + + + + Pointwise applies the round function to each value + + The vector to store the result + + + + Pointwise applies the sign function to each value + + + + + Pointwise applies the sign function to each value + + The vector to store the result + + + + Pointwise applies the sin function to each value + + + + + Pointwise applies the sin function to each value + + The vector to store the result + + + + Pointwise applies the sinh function to each value + + + + + Pointwise applies the sinh function to each value + + The vector to store the result + + + + Pointwise applies the sqrt function to each value + + + + + Pointwise applies the sqrt function to each value + + The vector to store the result + + + + Pointwise applies the tan function to each value + + + + + Pointwise applies the tan function to each value + + The vector to store the result + + + + Pointwise applies the tanh function to each value + + + + + Pointwise applies the tanh function to each value + + The vector to store the result + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + + Calculates the rank of the matrix. + + effective numerical rank, obtained from SVD + + + + Calculates the nullity of the matrix. + + effective numerical nullity, obtained from SVD + + + Calculates the condition number of this matrix. + The condition number of the matrix. + The condition number is calculated using singular value decomposition. + + + Computes the determinant of this matrix. + The determinant of this matrix. + + + + Computes an orthonormal basis for the null space of this matrix, + also known as the kernel of the corresponding matrix transformation. + + + + + Computes an orthonormal basis for the column space of this matrix, + also known as the range or image of the corresponding matrix transformation. + + + + Computes the inverse of this matrix. + The inverse of this matrix. + + + Computes the Moore-Penrose Pseudo-Inverse of this matrix. + + + + Computes the Kronecker product of this matrix with the given matrix. The new matrix is M-by-N + with M = this.Rows * lower.Rows and N = this.Columns * lower.Columns. + + The other matrix. + The Kronecker product of the two matrices. + + + + Computes the Kronecker product of this matrix with the given matrix. The new matrix is M-by-N + with M = this.Rows * lower.Rows and N = this.Columns * lower.Columns. + + The other matrix. + The Kronecker product of the two matrices. + If the result matrix's dimensions are not (this.Rows * lower.rows) x (this.Columns * lower.Columns). + + + + Pointwise applies the minimum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the minimum with a scalar to each value. + + The scalar value to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the maximum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the maximum with a scalar to each value. + + The scalar value to compare to. + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the absolute minimum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the absolute minimum with a scalar to each value. + + The scalar value to compare to. + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the absolute maximum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the absolute maximum with a scalar to each value. + + The scalar value to compare to. + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the minimum with the values of another matrix to each value. + + The matrix with the values to compare to. + + + + Pointwise applies the minimum with the values of another matrix to each value. + + The matrix with the values to compare to. + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the maximum with the values of another matrix to each value. + + The matrix with the values to compare to. + + + + Pointwise applies the maximum with the values of another matrix to each value. + + The matrix with the values to compare to. + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the absolute minimum with the values of another matrix to each value. + + The matrix with the values to compare to. + + + + Pointwise applies the absolute minimum with the values of another matrix to each value. + + The matrix with the values to compare to. + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the absolute maximum with the values of another matrix to each value. + + The matrix with the values to compare to. + + + + Pointwise applies the absolute maximum with the values of another matrix to each value. + + The matrix with the values to compare to. + The matrix to store the result. + If this matrix and are not the same size. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced L2 norm of the matrix. + The largest singular value of the matrix. + + For sparse matrices, the L2 norm is computed using a dense implementation of singular value decomposition. + In a later release, it will be replaced with a sparse implementation. + + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Calculates the p-norms of all row vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the p-norms of all column vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all row vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all column vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the value sum of each row vector. + + + + + Calculates the value sum of each column vector. + + + + + Calculates the absolute value sum of each row vector. + + + + + Calculates the absolute value sum of each column vector. + + + + + Indicates whether the current object is equal to another object of the same type. + + + An object to compare with this object. + + + true if the current object is equal to the parameter; otherwise, false. + + + + + Determines whether the specified is equal to this instance. + + The to compare with this instance. + + true if the specified is equal to this instance; otherwise, false. + + + + + Returns a hash code for this instance. + + + A hash code for this instance, suitable for use in hashing algorithms and data structures like a hash table. + + + + + Returns a string that describes the type, dimensions and shape of this matrix. + + + + + Returns a string 2D array that summarizes the content of this matrix. + + + + + Returns a string 2D array that summarizes the content of this matrix. + + + + + Returns a string that summarizes the content of this matrix. + + + + + Returns a string that summarizes the content of this matrix. + + + + + Returns a string that summarizes this matrix. + + + + + Returns a string that summarizes this matrix. + The maximum number of cells can be configured in the class. + + + + + Returns a string that summarizes this matrix. + The maximum number of cells can be configured in the class. + The format string is ignored. + + + + + Initializes a new instance of the Matrix class. + + + + + Gets the raw matrix data storage. + + + + + Gets the number of columns. + + The number of columns. + + + + Gets the number of rows. + + The number of rows. + + + + Gets or sets the value at the given row and column, with range checking. + + + The row of the element. + + + The column of the element. + + The value to get or set. + This method is ranged checked. and + to get and set values without range checking. + + + + Retrieves the requested element without range checking. + + + The row of the element. + + + The column of the element. + + + The requested element. + + + + + Sets the value of the given element without range checking. + + + The row of the element. + + + The column of the element. + + + The value to set the element to. + + + + + Sets all values to zero. + + + + + Sets all values of a row to zero. + + + + + Sets all values of a column to zero. + + + + + Sets all values for all of the chosen rows to zero. + + + + + Sets all values for all of the chosen columns to zero. + + + + + Sets all values of a sub-matrix to zero. + + + + + Set all values whose absolute value is smaller than the threshold to zero, in-place. + + + + + Set all values that meet the predicate to zero, in-place. + + + + + Creates a clone of this instance. + + + A clone of the instance. + + + + + Copies the elements of this matrix to the given matrix. + + + The matrix to copy values into. + + + If target is . + + + If this and the target matrix do not have the same dimensions.. + + + + + Copies a row into an Vector. + + The row to copy. + A Vector containing the copied elements. + If is negative, + or greater than or equal to the number of rows. + + + + Copies a row into to the given Vector. + + The row to copy. + The Vector to copy the row into. + If the result vector is . + If is negative, + or greater than or equal to the number of rows. + If this.Columns != result.Count. + + + + Copies the requested row elements into a new Vector. + + The row to copy elements from. + The column to start copying from. + The number of elements to copy. + A Vector containing the requested elements. + If: + is negative, + or greater than or equal to the number of rows. + is negative, + or greater than or equal to the number of columns. + (columnIndex + length) >= Columns. + If is not positive. + + + + Copies the requested row elements into a new Vector. + + The row to copy elements from. + The column to start copying from. + The number of elements to copy. + The Vector to copy the column into. + If the result Vector is . + If is negative, + or greater than or equal to the number of columns. + If is negative, + or greater than or equal to the number of rows. + If + + is greater than or equal to the number of rows. + If is not positive. + If result.Count < length. + + + + Copies a column into a new Vector>. + + The column to copy. + A Vector containing the copied elements. + If is negative, + or greater than or equal to the number of columns. + + + + Copies a column into to the given Vector. + + The column to copy. + The Vector to copy the column into. + If the result Vector is . + If is negative, + or greater than or equal to the number of columns. + If this.Rows != result.Count. + + + + Copies the requested column elements into a new Vector. + + The column to copy elements from. + The row to start copying from. + The number of elements to copy. + A Vector containing the requested elements. + If: + is negative, + or greater than or equal to the number of columns. + is negative, + or greater than or equal to the number of rows. + (rowIndex + length) >= Rows. + + If is not positive. + + + + Copies the requested column elements into the given vector. + + The column to copy elements from. + The row to start copying from. + The number of elements to copy. + The Vector to copy the column into. + If the result Vector is . + If is negative, + or greater than or equal to the number of columns. + If is negative, + or greater than or equal to the number of rows. + If + + is greater than or equal to the number of rows. + If is not positive. + If result.Count < length. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Creates a matrix that contains the values from the requested sub-matrix. + + The row to start copying from. + The number of rows to copy. Must be positive. + The column to start copying from. + The number of columns to copy. Must be positive. + The requested sub-matrix. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + If or + is not positive. + + + + Returns the elements of the diagonal in a Vector. + + The elements of the diagonal. + For non-square matrices, the method returns Min(Rows, Columns) elements where + i == j (i is the row index, and j is the column index). + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Creates a new matrix and inserts the given column at the given index. + + The index of where to insert the column. + The column to insert. + A new matrix with the inserted column. + If is . + If is < zero or > the number of columns. + If the size of != the number of rows. + + + + Creates a new matrix with the given column removed. + + The index of the column to remove. + A new matrix without the chosen column. + If is < zero or >= the number of columns. + + + + Copies the values of the given Vector to the specified column. + + The column to copy the values to. + The vector to copy the values from. + If is . + If is less than zero, + or greater than or equal to the number of columns. + If the size of does not + equal the number of rows of this Matrix. + + + + Copies the values of the given Vector to the specified sub-column. + + The column to copy the values to. + The row to start copying to. + The number of elements to copy. + The vector to copy the values from. + If is . + If is less than zero, + or greater than or equal to the number of columns. + If the size of does not + equal the number of rows of this Matrix. + + + + Copies the values of the given array to the specified column. + + The column to copy the values to. + The array to copy the values from. + If is . + If is less than zero, + or greater than or equal to the number of columns. + If the size of does not + equal the number of rows of this Matrix. + If the size of does not + equal the number of rows of this Matrix. + + + + Creates a new matrix and inserts the given row at the given index. + + The index of where to insert the row. + The row to insert. + A new matrix with the inserted column. + If is . + If is < zero or > the number of rows. + If the size of != the number of columns. + + + + Creates a new matrix with the given row removed. + + The index of the row to remove. + A new matrix without the chosen row. + If is < zero or >= the number of rows. + + + + Copies the values of the given Vector to the specified row. + + The row to copy the values to. + The vector to copy the values from. + If is . + If is less than zero, + or greater than or equal to the number of rows. + If the size of does not + equal the number of columns of this Matrix. + + + + Copies the values of the given Vector to the specified sub-row. + + The row to copy the values to. + The column to start copying to. + The number of elements to copy. + The vector to copy the values from. + If is . + If is less than zero, + or greater than or equal to the number of rows. + If the size of does not + equal the number of columns of this Matrix. + + + + Copies the values of the given array to the specified row. + + The row to copy the values to. + The array to copy the values from. + If is . + If is less than zero, + or greater than or equal to the number of rows. + If the size of does not + equal the number of columns of this Matrix. + + + + Copies the values of a given matrix into a region in this matrix. + + The row to start copying to. + The column to start copying to. + The sub-matrix to copy from. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + + + + Copies the values of a given matrix into a region in this matrix. + + The row to start copying to. + The number of rows to copy. Must be positive. + The column to start copying to. + The number of columns to copy. Must be positive. + The sub-matrix to copy from. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + the size of is not at least x . + If or + is not positive. + + + + Copies the values of a given matrix into a region in this matrix. + + The row to start copying to. + The row of the sub-matrix to start copying from. + The number of rows to copy. Must be positive. + The column to start copying to. + The column of the sub-matrix to start copying from. + The number of columns to copy. Must be positive. + The sub-matrix to copy from. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + the size of is not at least x . + If or + is not positive. + + + + Copies the values of the given Vector to the diagonal. + + The vector to copy the values from. The length of the vector should be + Min(Rows, Columns). + If is . + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + + Copies the values of the given array to the diagonal. + + The array to copy the values from. The length of the vector should be + Min(Rows, Columns). + If is . + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + + Returns the transpose of this matrix. + + The transpose of this matrix. + + + + Puts the transpose of this matrix into the result matrix. + + + + + Returns the conjugate transpose of this matrix. + + The conjugate transpose of this matrix. + + + + Puts the conjugate transpose of this matrix into the result matrix. + + + + + Permute the rows of a matrix according to a permutation. + + The row permutation to apply to this matrix. + + + + Permute the columns of a matrix according to a permutation. + + The column permutation to apply to this matrix. + + + + Concatenates this matrix with the given matrix. + + The matrix to concatenate. + The combined matrix. + + + + + + Concatenates this matrix with the given matrix and places the result into the result matrix. + + The matrix to concatenate. + The combined matrix. + + + + + + Stacks this matrix on top of the given matrix and places the result into the result matrix. + + The matrix to stack this matrix upon. + The combined matrix. + If lower is . + If upper.Columns != lower.Columns. + + + + + + Stacks this matrix on top of the given matrix and places the result into the result matrix. + + The matrix to stack this matrix upon. + The combined matrix. + If lower is . + If upper.Columns != lower.Columns. + + + + + + Diagonally stacks his matrix on top of the given matrix. The new matrix is a M-by-N matrix, + where M = this.Rows + lower.Rows and N = this.Columns + lower.Columns. + The values of off the off diagonal matrices/blocks are set to zero. + + The lower, right matrix. + If lower is . + the combined matrix + + + + + + Diagonally stacks his matrix on top of the given matrix and places the combined matrix into the result matrix. + + The lower, right matrix. + The combined matrix + If lower is . + If the result matrix is . + If the result matrix's dimensions are not (this.Rows + lower.rows) x (this.Columns + lower.Columns). + + + + + + Evaluates whether this matrix is symmetric. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + Evaluates whether this matrix is conjugate symmetric. + + + + + Returns this matrix as a multidimensional array. + The returned array will be independent from this matrix. + A new memory block will be allocated for the array. + + A multidimensional containing the values of this matrix. + + + + Returns the matrix's elements as an array with the data laid out column by column (column major). + The returned array will be independent from this matrix. + A new memory block will be allocated for the array. + +
+            1, 2, 3
+            4, 5, 6  will be returned as  1, 4, 7, 2, 5, 8, 3, 6, 9
+            7, 8, 9
+            
+ An array containing the matrix's elements. + + +
+ + + Returns the matrix's elements as an array with the data laid row by row (row major). + The returned array will be independent from this matrix. + A new memory block will be allocated for the array. + +
+            1, 2, 3
+            4, 5, 6  will be returned as  1, 2, 3, 4, 5, 6, 7, 8, 9
+            7, 8, 9
+            
+ An array containing the matrix's elements. + + +
+ + + Returns this matrix as array of row arrays. + The returned arrays will be independent from this matrix. + A new memory block will be allocated for the arrays. + + + + + Returns this matrix as array of column arrays. + The returned arrays will be independent from this matrix. + A new memory block will be allocated for the arrays. + + + + + Returns the internal multidimensional array of this matrix if, and only if, this matrix is stored by such an array internally. + Otherwise returns null. Changes to the returned array and the matrix will affect each other. + Use ToArray instead if you always need an independent array. + + + + + Returns the internal column by column (column major) array of this matrix if, and only if, this matrix is stored by such arrays internally. + Otherwise returns null. Changes to the returned arrays and the matrix will affect each other. + Use ToColumnMajorArray instead if you always need an independent array. + +
+            1, 2, 3
+            4, 5, 6  will be returned as  1, 4, 7, 2, 5, 8, 3, 6, 9
+            7, 8, 9
+            
+ An array containing the matrix's elements. + + +
+ + + Returns the internal row by row (row major) array of this matrix if, and only if, this matrix is stored by such arrays internally. + Otherwise returns null. Changes to the returned arrays and the matrix will affect each other. + Use ToRowMajorArray instead if you always need an independent array. + +
+            1, 2, 3
+            4, 5, 6  will be returned as  1, 2, 3, 4, 5, 6, 7, 8, 9
+            7, 8, 9
+            
+ An array containing the matrix's elements. + + +
+ + + Returns the internal row arrays of this matrix if, and only if, this matrix is stored by such arrays internally. + Otherwise returns null. Changes to the returned arrays and the matrix will affect each other. + Use ToRowArrays instead if you always need an independent array. + + + + + Returns the internal column arrays of this matrix if, and only if, this matrix is stored by such arrays internally. + Otherwise returns null. Changes to the returned arrays and the matrix will affect each other. + Use ToColumnArrays instead if you always need an independent array. + + + + + Returns an IEnumerable that can be used to iterate through all values of the matrix. + + + The enumerator will include all values, even if they are zero. + The ordering of the values is unspecified (not necessarily column-wise or row-wise). + + + + + Returns an IEnumerable that can be used to iterate through all values of the matrix. + + + The enumerator will include all values, even if they are zero. + The ordering of the values is unspecified (not necessarily column-wise or row-wise). + + + + + Returns an IEnumerable that can be used to iterate through all values of the matrix and their index. + + + The enumerator returns a Tuple with the first two values being the row and column index + and the third value being the value of the element at that index. + The enumerator will include all values, even if they are zero. + + + + + Returns an IEnumerable that can be used to iterate through all values of the matrix and their index. + + + The enumerator returns a Tuple with the first two values being the row and column index + and the third value being the value of the element at that index. + The enumerator will include all values, even if they are zero. + + + + + Returns an IEnumerable that can be used to iterate through all non-zero values of the matrix. + + + The enumerator will skip all elements with a zero value. + + + + + Returns an IEnumerable that can be used to iterate through all non-zero values of the matrix and their index. + + + The enumerator returns a Tuple with the first two values being the row and column index + and the third value being the value of the element at that index. + The enumerator will skip all elements with a zero value. + + + + + Returns an IEnumerable that can be used to iterate through all columns of the matrix. + + + + + Returns an IEnumerable that can be used to iterate through a subset of all columns of the matrix. + + The column to start enumerating over. + The number of columns to enumerating over. + + + + Returns an IEnumerable that can be used to iterate through all columns of the matrix and their index. + + + The enumerator returns a Tuple with the first value being the column index + and the second value being the value of the column at that index. + + + + + Returns an IEnumerable that can be used to iterate through a subset of all columns of the matrix and their index. + + The column to start enumerating over. + The number of columns to enumerating over. + + The enumerator returns a Tuple with the first value being the column index + and the second value being the value of the column at that index. + + + + + Returns an IEnumerable that can be used to iterate through all rows of the matrix. + + + + + Returns an IEnumerable that can be used to iterate through a subset of all rows of the matrix. + + The row to start enumerating over. + The number of rows to enumerating over. + + + + Returns an IEnumerable that can be used to iterate through all rows of the matrix and their index. + + + The enumerator returns a Tuple with the first value being the row index + and the second value being the value of the row at that index. + + + + + Returns an IEnumerable that can be used to iterate through a subset of all rows of the matrix and their index. + + The row to start enumerating over. + The number of rows to enumerating over. + + The enumerator returns a Tuple with the first value being the row index + and the second value being the value of the row at that index. + + + + + Applies a function to each value of this matrix and replaces the value with its result. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + Applies a function to each value of this matrix and replaces the value with its result. + The row and column indices of each value (zero-based) are passed as first arguments to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + Applies a function to each value of this matrix and replaces the value in the result matrix. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + Applies a function to each value of this matrix and replaces the value in the result matrix. + The index of each value (zero-based) is passed as first argument to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + Applies a function to each value of this matrix and replaces the value in the result matrix. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + Applies a function to each value of this matrix and replaces the value in the result matrix. + The index of each value (zero-based) is passed as first argument to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + Applies a function to each value of this matrix and returns the results as a new matrix. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + Applies a function to each value of this matrix and returns the results as a new matrix. + The index of each value (zero-based) is passed as first argument to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + For each row, applies a function f to each element of the row, threading an accumulator argument through the computation. + Returns an array with the resulting accumulator states for each row. + + + + + For each column, applies a function f to each element of the column, threading an accumulator argument through the computation. + Returns an array with the resulting accumulator states for each column. + + + + + Applies a function f to each row vector, threading an accumulator vector argument through the computation. + Returns the resulting accumulator vector. + + + + + Applies a function f to each column vector, threading an accumulator vector argument through the computation. + Returns the resulting accumulator vector. + + + + + Reduces all row vectors by applying a function between two of them, until only a single vector is left. + + + + + Reduces all column vectors by applying a function between two of them, until only a single vector is left. + + + + + Applies a function to each value pair of two matrices and replaces the value in the result vector. + + + + + Applies a function to each value pair of two matrices and returns the results as a new vector. + + + + + Applies a function to update the status with each value pair of two matrices and returns the resulting status. + + + + + Returns a tuple with the index and value of the first element satisfying a predicate, or null if none is found. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns a tuple with the index and values of the first element pair of two matrices of the same size satisfying a predicate, or null if none is found. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if at least one element satisfies a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if at least one element pairs of two matrices of the same size satisfies a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if all elements satisfy a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if all element pairs of two matrices of the same size satisfy a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Adds a scalar to each element of the matrix. + + This operator will allocate new memory for the result. It will + choose the representation of the provided matrix. + The left matrix to add. + The scalar value to add. + The result of the addition. + If is . + + + + Adds a scalar to each element of the matrix. + + This operator will allocate new memory for the result. It will + choose the representation of the provided matrix. + The scalar value to add. + The right matrix to add. + The result of the addition. + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the subtraction. + If and don't have the same dimensions. + If or is . + + + + Subtracts a scalar from each element of a matrix. + + This operator will allocate new memory for the result. It will + choose the representation of the provided matrix. + The left matrix to subtract. + The scalar value to subtract. + The result of the subtraction. + If and don't have the same dimensions. + If or is . + + + + Substracts each element of a matrix from a scalar. + + This operator will allocate new memory for the result. It will + choose the representation of the provided matrix. + The scalar value to subtract. + The right matrix to subtract. + The result of the subtraction. + If and don't have the same dimensions. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Divides a scalar with a matrix. + + The scalar to divide. + The matrix. + The result of the division. + If is . + + + + Divides a matrix with a scalar. + + The matrix to divide. + The scalar value. + The result of the division. + If is . + + + + Computes the pointwise remainder (% operator), where the result has the sign of the dividend, + of each element of the matrix of the given divisor. + + The matrix whose elements we want to compute the modulus of. + The divisor to use. + The result of the calculation + If is . + + + + Computes the pointwise remainder (% operator), where the result has the sign of the dividend, + of the given dividend of each element of the matrix. + + The dividend we want to compute the modulus of. + The matrix whose elements we want to use as divisor. + The result of the calculation + If is . + + + + Computes the pointwise remainder (% operator), where the result has the sign of the dividend, + of each element of two matrices. + + The matrix whose elements we want to compute the remainder of. + The divisor to use. + If and are not the same size. + If is . + + + + Computes the sqrt of a matrix pointwise + + The input matrix + + + + + Computes the exponential of a matrix pointwise + + The input matrix + + + + + Computes the log of a matrix pointwise + + The input matrix + + + + + Computes the log10 of a matrix pointwise + + The input matrix + + + + + Computes the sin of a matrix pointwise + + The input matrix + + + + + Computes the cos of a matrix pointwise + + The input matrix + + + + + Computes the tan of a matrix pointwise + + The input matrix + + + + + Computes the asin of a matrix pointwise + + The input matrix + + + + + Computes the acos of a matrix pointwise + + The input matrix + + + + + Computes the atan of a matrix pointwise + + The input matrix + + + + + Computes the sinh of a matrix pointwise + + The input matrix + + + + + Computes the cosh of a matrix pointwise + + The input matrix + + + + + Computes the tanh of a matrix pointwise + + The input matrix + + + + + Computes the absolute value of a matrix pointwise + + The input matrix + + + + + Computes the floor of a matrix pointwise + + The input matrix + + + + + Computes the ceiling of a matrix pointwise + + The input matrix + + + + + Computes the rounded value of a matrix pointwise + + The input matrix + + + + + Computes the Cholesky decomposition for a matrix. + + The Cholesky decomposition object. + + + + Computes the LU decomposition for a matrix. + + The LU decomposition object. + + + + Computes the QR decomposition for a matrix. + + The type of QR factorization to perform. + The QR decomposition object. + + + + Computes the QR decomposition for a matrix using Modified Gram-Schmidt Orthogonalization. + + The QR decomposition object. + + + + Computes the SVD decomposition for a matrix. + + Compute the singular U and VT vectors or not. + The SVD decomposition object. + + + + Computes the EVD decomposition for a matrix. + + The EVD decomposition object. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix (this matrix), b is the solution vector and x is the unknown vector. + + The solution vector b. + The result vector x. + The iterative solver to use. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + Solves the matrix equation AX = B, where A is the coefficient matrix (this matrix), B is the solution matrix and X is the unknown matrix. + + The solution matrix B. + The result matrix X + The iterative solver to use. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix (this matrix), b is the solution vector and x is the unknown vector. + + The solution vector b. + The result vector x. + The iterative solver to use. + Criteria to control when to stop iterating. + The preconditioner to use for approximations. + + + + Solves the matrix equation AX = B, where A is the coefficient matrix (this matrix), B is the solution matrix and X is the unknown matrix. + + The solution matrix B. + The result matrix X + The iterative solver to use. + Criteria to control when to stop iterating. + The preconditioner to use for approximations. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix (this matrix), b is the solution vector and x is the unknown vector. + + The solution vector b. + The result vector x. + The iterative solver to use. + Criteria to control when to stop iterating. + + + + Solves the matrix equation AX = B, where A is the coefficient matrix (this matrix), B is the solution matrix and X is the unknown matrix. + + The solution matrix B. + The result matrix X + The iterative solver to use. + Criteria to control when to stop iterating. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix (this matrix), b is the solution vector and x is the unknown vector. + + The solution vector b. + The iterative solver to use. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + The result vector x. + + + + Solves the matrix equation AX = B, where A is the coefficient matrix (this matrix), B is the solution matrix and X is the unknown matrix. + + The solution matrix B. + The iterative solver to use. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + The result matrix X. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix (this matrix), b is the solution vector and x is the unknown vector. + + The solution vector b. + The iterative solver to use. + Criteria to control when to stop iterating. + The preconditioner to use for approximations. + The result vector x. + + + + Solves the matrix equation AX = B, where A is the coefficient matrix (this matrix), B is the solution matrix and X is the unknown matrix. + + The solution matrix B. + The iterative solver to use. + Criteria to control when to stop iterating. + The preconditioner to use for approximations. + The result matrix X. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix (this matrix), b is the solution vector and x is the unknown vector. + + The solution vector b. + The iterative solver to use. + Criteria to control when to stop iterating. + The result vector x. + + + + Solves the matrix equation AX = B, where A is the coefficient matrix (this matrix), B is the solution matrix and X is the unknown matrix. + + The solution matrix B. + The iterative solver to use. + Criteria to control when to stop iterating. + The result matrix X. + + + + Converts a matrix to single precision. + + + + + Converts a matrix to double precision. + + + + + Converts a matrix to single precision complex numbers. + + + + + Converts a matrix to double precision complex numbers. + + + + + Gets a single precision complex matrix with the real parts from the given matrix. + + + + + Gets a double precision complex matrix with the real parts from the given matrix. + + + + + Gets a real matrix representing the real parts of a complex matrix. + + + + + Gets a real matrix representing the real parts of a complex matrix. + + + + + Gets a real matrix representing the imaginary parts of a complex matrix. + + + + + Gets a real matrix representing the imaginary parts of a complex matrix. + + + + + Existing data may not be all zeros, so clearing may be necessary + if not all of it will be overwritten anyway. + + + + + If existing data is assumed to be all zeros already, + clearing it may be skipped if applicable. + + + + + Allow skipping zero entries (without enforcing skipping them). + When enumerating sparse matrices this can significantly speed up operations. + + + + + Force applying the operation to all fields even if they are zero. + + + + + It is not known yet whether a matrix is symmetric or not. + + + + + A matrix is symmetric + + + + + A matrix is hermitian (conjugate symmetric). + + + + + A matrix is not symmetric + + + + + Defines an that uses a cancellation token as stop criterion. + + + + + Initializes a new instance of the class. + + + + + Initializes a new instance of the class. + + + + + Determines the status of the iterative calculation based on the stop criteria stored + by the current . Result is set into Status field. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual stop criteria may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Gets the current calculation status. + + + + + Resets the to the pre-calculation state. + + + + + Clones the current and its settings. + + A new instance of the class. + + + + Stop criterion that delegates the status determination to a delegate. + + + + + Create a new instance of this criterion with a custom implementation. + + Custom implementation with the same signature and semantics as the DetermineStatus method. + + + + Determines the status of the iterative calculation by delegating it to the provided delegate. + Result is set into Status field. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual stop criteria may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Gets the current calculation status. + + + + + Resets the IIterationStopCriterion to the pre-calculation state. + + + + + Clones this criterion and its settings. + + + + + Monitors an iterative calculation for signs of divergence. + + + + + The maximum relative increase the residual may experience without triggering a divergence warning. + + + + + The number of iterations over which a residual increase should be tracked before issuing a divergence warning. + + + + + The status of the calculation + + + + + The array that holds the tracking information. + + + + + The iteration number of the last iteration. + + + + + Initializes a new instance of the class with the specified maximum + relative increase and the specified minimum number of tracking iterations. + + The maximum relative increase that the residual may experience before a divergence warning is issued. + The minimum number of iterations over which the residual must grow before a divergence warning is issued. + + + + Gets or sets the maximum relative increase that the residual may experience before a divergence warning is issued. + + Thrown if the Maximum is set to zero or below. + + + + Gets or sets the minimum number of iterations over which the residual must grow before + issuing a divergence warning. + + Thrown if the value is set to less than one. + + + + Determines the status of the iterative calculation based on the stop criteria stored + by the current . Result is set into Status field. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual stop criteria may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Detect if solution is diverging + + true if diverging, otherwise false + + + + Gets required history Length + + + + + Gets the current calculation status. + + + + + Resets the to the pre-calculation state. + + + + + Clones the current and its settings. + + A new instance of the class. + + + + Defines an that monitors residuals for NaN's. + + + + + The status of the calculation + + + + + The iteration number of the last iteration. + + + + + Determines the status of the iterative calculation based on the stop criteria stored + by the current . Result is set into Status field. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual stop criteria may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Gets the current calculation status. + + + + + Resets the to the pre-calculation state. + + + + + Clones the current and its settings. + + A new instance of the class. + + + + The base interface for classes that provide stop criteria for iterative calculations. + + + + + Determines the status of the iterative calculation based on the stop criteria stored + by the current IIterationStopCriterion. Status is set to Status field of current object. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual stop criteria may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Gets the current calculation status. + + is not a legal value. Status should be set in implementation. + + + + Resets the IIterationStopCriterion to the pre-calculation state. + + To implementers: Invoking this method should not clear the user defined + property values, only the state that is used to track the progress of the + calculation. + + + + Defines the interface for classes that solve the matrix equation Ax = b in + an iterative manner. + + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + Defines the interface for objects that can create an iterative solver with + specific settings. This interface is used to pass iterative solver creation + setup information around. + + + + + Gets the type of the solver that will be created by this setup object. + + + + + Gets type of preconditioner, if any, that will be created by this setup object. + + + + + Creates the iterative solver to be used. + + + + + Creates the preconditioner to be used by default (can be overwritten). + + + + + Gets the relative speed of the solver. + + Returns a value between 0 and 1, inclusive. + + + + Gets the relative reliability of the solver. + + Returns a value between 0 and 1 inclusive. + + + + The base interface for preconditioner classes. + + + + Preconditioners are used by iterative solvers to improve the convergence + speed of the solving process. Increase in convergence speed + is related to the number of iterations necessary to get a converged solution. + So while in general the use of a preconditioner means that the iterative + solver will perform fewer iterations it does not guarantee that the actual + solution time decreases given that some preconditioners can be expensive to + setup and run. + + + Note that in general changes to the matrix will invalidate the preconditioner + if the changes occur after creating the preconditioner. + + + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix on which the preconditioner is based. + + + + Approximates the solution to the matrix equation Mx = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + Defines an that monitors the numbers of iteration + steps as stop criterion. + + + + + The default value for the maximum number of iterations the process is allowed + to perform. + + + + + The maximum number of iterations the calculation is allowed to perform. + + + + + The status of the calculation + + + + + Initializes a new instance of the class with the default maximum + number of iterations. + + + + + Initializes a new instance of the class with the specified maximum + number of iterations. + + The maximum number of iterations the calculation is allowed to perform. + + + + Gets or sets the maximum number of iterations the calculation is allowed to perform. + + Thrown if the Maximum is set to a negative value. + + + + Returns the maximum number of iterations to the default. + + + + + Determines the status of the iterative calculation based on the stop criteria stored + by the current . Result is set into Status field. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual stop criteria may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Gets the current calculation status. + + + + + Resets the to the pre-calculation state. + + + + + Clones the current and its settings. + + A new instance of the class. + + + + Iterative Calculation Status + + + + + An iterator that is used to check if an iterative calculation should continue or stop. + + + + + The collection that holds all the stop criteria and the flag indicating if they should be added + to the child iterators. + + + + + The status of the iterator. + + + + + Initializes a new instance of the class with the default stop criteria. + + + + + Initializes a new instance of the class with the specified stop criteria. + + + The specified stop criteria. Only one stop criterion of each type can be passed in. None + of the stop criteria will be passed on to child iterators. + + + + + Initializes a new instance of the class with the specified stop criteria. + + + The specified stop criteria. Only one stop criterion of each type can be passed in. None + of the stop criteria will be passed on to child iterators. + + + + + Gets the current calculation status. + + + + + Determines the status of the iterative calculation based on the stop criteria stored + by the current . Result is set into Status field. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual iterators may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Indicates to the iterator that the iterative process has been cancelled. + + + Does not reset the stop-criteria. + + + + + Resets the to the pre-calculation state. + + + + + Creates a deep clone of the current iterator. + + The deep clone of the current iterator. + + + + Defines an that monitors residuals as stop criterion. + + + + + The maximum value for the residual below which the calculation is considered converged. + + + + + The minimum number of iterations for which the residual has to be below the maximum before + the calculation is considered converged. + + + + + The status of the calculation + + + + + The number of iterations since the residuals got below the maximum. + + + + + The iteration number of the last iteration. + + + + + Initializes a new instance of the class with the specified + maximum residual and minimum number of iterations. + + + The maximum value for the residual below which the calculation is considered converged. + + + The minimum number of iterations for which the residual has to be below the maximum before + the calculation is considered converged. + + + + + Gets or sets the maximum value for the residual below which the calculation is considered + converged. + + Thrown if the Maximum is set to a negative value. + + + + Gets or sets the minimum number of iterations for which the residual has to be + below the maximum before the calculation is considered converged. + + Thrown if the BelowMaximumFor is set to a value less than 1. + + + + Determines the status of the iterative calculation based on the stop criteria stored + by the current . Result is set into Status field. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual stop criteria may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Gets the current calculation status. + + + + + Resets the to the pre-calculation state. + + + + + Clones the current and its settings. + + A new instance of the class. + + + + Loads the available objects from the specified assembly. + + The assembly which will be searched for setup objects. + If true, types that fail to load are simply ignored. Otherwise the exception is rethrown. + The types that should not be loaded. + + + + Loads the available objects from the specified assembly. + + The type in the assembly which should be searched for setup objects. + If true, types that fail to load are simply ignored. Otherwise the exception is rethrown. + The types that should not be loaded. + + + + Loads the available objects from the specified assembly. + + The of the assembly that should be searched for setup objects. + If true, types that fail to load are simply ignored. Otherwise the exception is rethrown. + The types that should not be loaded. + + + + Loads the available objects from the Math.NET Numerics assembly. + + The types that should not be loaded. + + + + Loads the available objects from the Math.NET Numerics assembly. + + + + + A unit preconditioner. This preconditioner does not actually do anything + it is only used when running an without + a preconditioner. + + + + + The coefficient matrix on which this preconditioner operates. + Is used to check dimensions on the different vectors that are processed. + + + + + Initializes the preconditioner and loads the internal data structures. + + + The matrix upon which the preconditioner is based. + + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + If and do not have the same size. + + + - or - + + + If the size of is different the number of rows of the coefficient matrix. + + + + + + True if the matrix storage format is dense. + + + + + True if all fields of this matrix can be set to any value. + False if some fields are fixed, like on a diagonal matrix. + + + + + True if the specified field can be set to any value. + False if the field is fixed, like an off-diagonal field on a diagonal matrix. + + + + + Retrieves the requested element without range checking. + + + + + Sets the element without range checking. + + + + + Evaluate the row and column at a specific data index. + + + + + True if the vector storage format is dense. + + + + + Retrieves the requested element without range checking. + + + + + Sets the element without range checking. + + + + + True if the matrix storage format is dense. + + + + + True if all fields of this matrix can be set to any value. + False if some fields are fixed, like on a diagonal matrix. + + + + + True if the specified field can be set to any value. + False if the field is fixed, like an off-diagonal field on a diagonal matrix. + + + + + Retrieves the requested element without range checking. + + + + + Sets the element without range checking. + + + + + Returns a hash code for this instance. + + + A hash code for this instance, suitable for use in hashing algorithms and data structures like a hash table. + + + + + True if the matrix storage format is dense. + + + + + True if all fields of this matrix can be set to any value. + False if some fields are fixed, like on a diagonal matrix. + + + + + True if the specified field can be set to any value. + False if the field is fixed, like an off-diagonal field on a diagonal matrix. + + + + + Gets or sets the value at the given row and column, with range checking. + + + The row of the element. + + + The column of the element. + + The value to get or set. + This method is ranged checked. and + to get and set values without range checking. + + + + Retrieves the requested element without range checking. + + + The row of the element. + + + The column of the element. + + + The requested element. + + Not range-checked. + + + + Sets the element without range checking. + + The row of the element. + The column of the element. + The value to set the element to. + WARNING: This method is not thread safe. Use "lock" with it and be sure to avoid deadlocks. + + + + Indicates whether the current object is equal to another object of the same type. + + + An object to compare with this object. + + + true if the current object is equal to the parameter; otherwise, false. + + + + + Determines whether the specified is equal to the current . + + + true if the specified is equal to the current ; otherwise, false. + + The to compare with the current . + + + + Serves as a hash function for a particular type. + + + A hash code for the current . + + + + The state array will not be modified, unless it is the same instance as the target array (which is allowed). + + + The state array will not be modified, unless it is the same instance as the target array (which is allowed). + + + The state array will not be modified, unless it is the same instance as the target array (which is allowed). + + + The state array will not be modified, unless it is the same instance as the target array (which is allowed). + + + + The array containing the row indices of the existing rows. Element "i" of the array gives the index of the + element in the array that is first non-zero element in a row "i". + The last value is equal to ValueCount, so that the number of non-zero entries in row "i" is always + given by RowPointers[i+i] - RowPointers[i]. This array thus has length RowCount+1. + + + + + An array containing the column indices of the non-zero values. Element "j" of the array + is the number of the column in matrix that contains the j-th value in the array. + + + + + Array that contains the non-zero elements of matrix. Values of the non-zero elements of matrix are mapped into the values + array using the row-major storage mapping described in a compressed sparse row (CSR) format. + + + + + Gets the number of non zero elements in the matrix. + + The number of non zero elements. + + + + True if the matrix storage format is dense. + + + + + True if all fields of this matrix can be set to any value. + False if some fields are fixed, like on a diagonal matrix. + + + + + True if the specified field can be set to any value. + False if the field is fixed, like an off-diagonal field on a diagonal matrix. + + + + + Retrieves the requested element without range checking. + + + The row of the element. + + + The column of the element. + + + The requested element. + + Not range-checked. + + + + Sets the element without range checking. + + The row of the element. + The column of the element. + The value to set the element to. + WARNING: This method is not thread safe. Use "lock" with it and be sure to avoid deadlocks. + + + + Delete value from internal storage + + Index of value in nonZeroValues array + Row number of matrix + WARNING: This method is not thread safe. Use "lock" with it and be sure to avoid deadlocks + + + + Find item Index in nonZeroValues array + + Matrix row index + Matrix column index + Item index + WARNING: This method is not thread safe. Use "lock" with it and be sure to avoid deadlocks + + + + Calculates the amount with which to grow the storage array's if they need to be + increased in size. + + The amount grown. + + + + Returns a hash code for this instance. + + + A hash code for this instance, suitable for use in hashing algorithms and data structures like a hash table. + + + + + Array that contains the indices of the non-zero values. + + + + + Array that contains the non-zero elements of the vector. + + + + + Gets the number of non-zero elements in the vector. + + + + + True if the vector storage format is dense. + + + + + Retrieves the requested element without range checking. + + + + + Sets the element without range checking. + + + + + Calculates the amount with which to grow the storage array's if they need to be + increased in size. + + The amount grown. + + + + Returns a hash code for this instance. + + + A hash code for this instance, suitable for use in hashing algorithms and data structures like a hash table. + + + + + True if the vector storage format is dense. + + + + + Gets or sets the value at the given index, with range checking. + + + The index of the element. + + The value to get or set. + This method is ranged checked. and + to get and set values without range checking. + + + + Retrieves the requested element without range checking. + + The index of the element. + The requested element. + Not range-checked. + + + + Sets the element without range checking. + + The index of the element. + The value to set the element to. + WARNING: This method is not thread safe. Use "lock" with it and be sure to avoid deadlocks. + + + + Indicates whether the current object is equal to another object of the same type. + + + An object to compare with this object. + + + true if the current object is equal to the parameter; otherwise, false. + + + + + Determines whether the specified is equal to the current . + + + true if the specified is equal to the current ; otherwise, false. + + The to compare with the current . + + + + Serves as a hash function for a particular type. + + + A hash code for the current . + + + + + Defines the generic class for Vector classes. + + Supported data types are double, single, , and . + + + + The zero value for type T. + + + + + The value of 1.0 for type T. + + + + + Negates vector and save result to + + Target vector + + + + Complex conjugates vector and save result to + + Target vector + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + The scalar to add. + The vector to store the result of the addition. + + + + Adds another vector to this vector and stores the result into the result vector. + + The vector to add to this one. + The vector to store the result of the addition. + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The vector to store the result of the subtraction. + + + + Subtracts each element of the vector from a scalar and stores the result in the result vector. + + The scalar to subtract from. + The vector to store the result of the subtraction. + + + + Subtracts another vector to this vector and stores the result into the result vector. + + The vector to subtract from this one. + The vector to store the result of the subtraction. + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + The scalar to multiply. + The vector to store the result of the multiplication. + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Computes the outer product M[i,j] = u[i]*v[j] of this and another vector and stores the result in the result matrix. + + The other vector + The matrix to store the result of the product. + + + + Divides each element of the vector by a scalar and stores the result in the result vector. + + The scalar denominator to use. + The vector to store the result of the division. + + + + Divides a scalar by each element of the vector and stores the result in the result vector. + + The scalar numerator to use. + The vector to store the result of the division. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the division. + + + + Pointwise raise this vector to an exponent and store the result into the result vector. + + The exponent to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Adds a scalar to each element of the vector. + + The scalar to add. + A copy of the vector with the scalar added. + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + The scalar to add. + The vector to store the result of the addition. + If this vector and are not the same size. + + + + Adds another vector to this vector. + + The vector to add to this one. + A new vector containing the sum of both vectors. + If this vector and are not the same size. + + + + Adds another vector to this vector and stores the result into the result vector. + + The vector to add to this one. + The vector to store the result of the addition. + If this vector and are not the same size. + If this vector and are not the same size. + + + + Subtracts a scalar from each element of the vector. + + The scalar to subtract. + A new vector containing the subtraction of this vector and the scalar. + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The vector to store the result of the subtraction. + If this vector and are not the same size. + + + + Subtracts each element of the vector from a scalar. + + The scalar to subtract from. + A new vector containing the subtraction of the scalar and this vector. + + + + Subtracts each element of the vector from a scalar and stores the result in the result vector. + + The scalar to subtract from. + The vector to store the result of the subtraction. + If this vector and are not the same size. + + + + Returns a negated vector. + + The negated vector. + Added as an alternative to the unary negation operator. + + + + Negates vector and save result to + + Target vector + + + + Subtracts another vector from this vector. + + The vector to subtract from this one. + A new vector containing the subtraction of the the two vectors. + If this vector and are not the same size. + + + + Subtracts another vector to this vector and stores the result into the result vector. + + The vector to subtract from this one. + The vector to store the result of the subtraction. + If this vector and are not the same size. + If this vector and are not the same size. + + + + Return vector with complex conjugate values of the source vector + + Conjugated vector + + + + Complex conjugates vector and save result to + + Target vector + + + + Multiplies a scalar to each element of the vector. + + The scalar to multiply. + A new vector that is the multiplication of the vector and the scalar. + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + The scalar to multiply. + The vector to store the result of the multiplication. + If this vector and are not the same size. + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + If is not of the same size. + + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + If is not of the same size. + If is . + + + + + Divides each element of the vector by a scalar. + + The scalar to divide with. + A new vector that is the division of the vector and the scalar. + + + + Divides each element of the vector by a scalar and stores the result in the result vector. + + The scalar to divide with. + The vector to store the result of the division. + If this vector and are not the same size. + + + + Divides a scalar by each element of the vector. + + The scalar to divide. + A new vector that is the division of the vector and the scalar. + + + + Divides a scalar by each element of the vector and stores the result in the result vector. + + The scalar to divide. + The vector to store the result of the division. + If this vector and are not the same size. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector containing the result. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector containing the result. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (vector % divisor), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector containing the result. + + + + Computes the remainder (vector % divisor), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (dividend % vector), where the result has the sign of the dividend, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector containing the result. + + + + Computes the remainder (dividend % vector), where the result has the sign of the dividend, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Pointwise multiplies this vector with another vector. + + The vector to pointwise multiply with this one. + A new vector which is the pointwise multiplication of the two vectors. + If this vector and are not the same size. + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + If this vector and are not the same size. + If this vector and are not the same size. + + + + Pointwise divide this vector with another vector. + + The pointwise denominator vector to use. + A new vector which is the pointwise division of the two vectors. + If this vector and are not the same size. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The vector to store the result of the pointwise division. + If this vector and are not the same size. + If this vector and are not the same size. + + + + Pointwise raise this vector to an exponent. + + The exponent to raise this vector values to. + + + + Pointwise raise this vector to an exponent and store the result into the result vector. + + The exponent to raise this vector values to. + The matrix to store the result into. + If this vector and are not the same size. + + + + Pointwise raise this vector to an exponent and store the result into the result vector. + + The exponent to raise this vector values to. + + + + Pointwise raise this vector to an exponent. + + The exponent to raise this vector values to. + The vector to store the result into. + If this vector and are not the same size. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this vector with another vector. + + The pointwise denominator vector to use. + If this vector and are not the same size. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The vector to store the result of the pointwise modulus. + If this vector and are not the same size. + If this vector and are not the same size. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this vector with another vector. + + The pointwise denominator vector to use. + If this vector and are not the same size. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The vector to store the result of the pointwise remainder. + If this vector and are not the same size. + If this vector and are not the same size. + + + + Helper function to apply a unary function to a vector. The function + f modifies the vector given to it in place. Before its + called, a copy of the 'this' vector with the same dimension is + first created, then passed to f. The copy is returned as the result + + Function which takes a vector, modifies it in place and returns void + New instance of vector which is the result + + + + Helper function to apply a unary function which modifies a vector + in place. + + Function which takes a vector, modifies it in place and returns void + The vector where the result is to be stored + If this vector and are not the same size. + + + + Helper function to apply a binary function which takes a scalar and + a vector and modifies the latter in place. A copy of the "this" + vector is therefore first made and then passed to f together with + the scalar argument. The copy is then returned as the result + + Function which takes a scalar and a vector, modifies the vector in place and returns void + The scalar to be passed to the function + The resulting vector + + + + Helper function to apply a binary function which takes a scalar and + a vector, modifies the latter in place and returns void. + + Function which takes a scalar and a vector, modifies the vector in place and returns void + The scalar to be passed to the function + If this vector and are not the same size. + + + + Helper function to apply a binary function which takes two vectors + and modifies the latter in place. A copy of the "this" vector is + first made and then passed to f together with the other vector. The + copy is then returned as the result + + Function which takes two vectors, modifies the second in place and returns void + The other vector to be passed to the function as argument. It is not modified + The resulting vector + If this vector and are not the same size. + + + + Helper function to apply a binary function which takes two vectors + and modifies the second one in place + + Function which takes two vectors, modifies the second in place and returns void + The other vector to be passed to the function as argument. It is not modified + The resulting vector + If this vector and are not the same size. + + + + Pointwise applies the exponent function to each value. + + + + + Pointwise applies the exponent function to each value. + + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the natural logarithm function to each value. + + + + + Pointwise applies the natural logarithm function to each value. + + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the abs function to each value + + + + + Pointwise applies the abs function to each value + + The vector to store the result + + + + Pointwise applies the acos function to each value + + + + + Pointwise applies the acos function to each value + + The vector to store the result + + + + Pointwise applies the asin function to each value + + + + + Pointwise applies the asin function to each value + + The vector to store the result + + + + Pointwise applies the atan function to each value + + + + + Pointwise applies the atan function to each value + + The vector to store the result + + + + Pointwise applies the atan2 function to each value of the current + vector and a given other vector being the 'x' of atan2 and the + 'this' vector being the 'y' + + + + + + + Pointwise applies the atan2 function to each value of the current + vector and a given other vector being the 'x' of atan2 and the + 'this' vector being the 'y' + + + + + + + Pointwise applies the ceiling function to each value + + + + + Pointwise applies the ceiling function to each value + + The vector to store the result + + + + Pointwise applies the cos function to each value + + + + + Pointwise applies the cos function to each value + + The vector to store the result + + + + Pointwise applies the cosh function to each value + + + + + Pointwise applies the cosh function to each value + + The vector to store the result + + + + Pointwise applies the floor function to each value + + + + + Pointwise applies the floor function to each value + + The vector to store the result + + + + Pointwise applies the log10 function to each value + + + + + Pointwise applies the log10 function to each value + + The vector to store the result + + + + Pointwise applies the round function to each value + + + + + Pointwise applies the round function to each value + + The vector to store the result + + + + Pointwise applies the sign function to each value + + + + + Pointwise applies the sign function to each value + + The vector to store the result + + + + Pointwise applies the sin function to each value + + + + + Pointwise applies the sin function to each value + + The vector to store the result + + + + Pointwise applies the sinh function to each value + + + + + Pointwise applies the sinh function to each value + + The vector to store the result + + + + Pointwise applies the sqrt function to each value + + + + + Pointwise applies the sqrt function to each value + + The vector to store the result + + + + Pointwise applies the tan function to each value + + + + + Pointwise applies the tan function to each value + + The vector to store the result + + + + Pointwise applies the tanh function to each value + + + + + Pointwise applies the tanh function to each value + + The vector to store the result + + + + Computes the outer product M[i,j] = u[i]*v[j] of this and another vector. + + The other vector + + + + Computes the outer product M[i,j] = u[i]*v[j] of this and another vector and stores the result in the result matrix. + + The other vector + The matrix to store the result of the product. + + + + Pointwise applies the minimum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the minimum with a scalar to each value. + + The scalar value to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the maximum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the maximum with a scalar to each value. + + The scalar value to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the absolute minimum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the absolute minimum with a scalar to each value. + + The scalar value to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the absolute maximum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the absolute maximum with a scalar to each value. + + The scalar value to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the minimum with the values of another vector to each value. + + The vector with the values to compare to. + + + + Pointwise applies the minimum with the values of another vector to each value. + + The vector with the values to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the maximum with the values of another vector to each value. + + The vector with the values to compare to. + + + + Pointwise applies the maximum with the values of another vector to each value. + + The vector with the values to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the absolute minimum with the values of another vector to each value. + + The vector with the values to compare to. + + + + Pointwise applies the absolute minimum with the values of another vector to each value. + + The vector with the values to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the absolute maximum with the values of another vector to each value. + + The vector with the values to compare to. + + + + Pointwise applies the absolute maximum with the values of another vector to each value. + + The vector with the values to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = (sum(abs(this[i])^p))^(1/p) + + + + Normalizes this vector to a unit vector with respect to the p-norm. + + The p value. + This vector normalized to a unit vector with respect to the p-norm. + + + + Returns the value of the absolute minimum element. + + The value of the absolute minimum element. + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the value of the absolute maximum element. + + The value of the absolute maximum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Returns the value of maximum element. + + The value of maximum element. + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the value of the minimum element. + + The value of the minimum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Computes the sum of the absolute value of the vector's elements. + + The sum of the absolute value of the vector's elements. + + + + Indicates whether the current object is equal to another object of the same type. + + An object to compare with this object. + + true if the current object is equal to the parameter; otherwise, false. + + + + + Determines whether the specified is equal to this instance. + + The to compare with this instance. + + true if the specified is equal to this instance; otherwise, false. + + + + + Returns a hash code for this instance. + + + A hash code for this instance, suitable for use in hashing algorithms and data structures like a hash table. + + + + + Returns an enumerator that iterates through the collection. + + + A that can be used to iterate through the collection. + + + + + Returns an enumerator that iterates through a collection. + + + An object that can be used to iterate through the collection. + + + + + Returns a string that describes the type, dimensions and shape of this vector. + + + + + Returns a string that represents the content of this vector, column by column. + + Maximum number of entries and thus lines per column. Typical value: 12; Minimum: 3. + Maximum number of chatacters per line over all columns. Typical value: 80; Minimum: 16. + Character to use to print if there is not enough space to print all entries. Typical value: "..". + Character to use to separate two coluns on a line. Typical value: " " (2 spaces). + Character to use to separate two rows/lines. Typical value: Environment.NewLine. + Function to provide a string for any given entry value. + + + + Returns a string that represents the content of this vector, column by column. + + Maximum number of entries and thus lines per column. Typical value: 12; Minimum: 3. + Maximum number of chatacters per line over all columns. Typical value: 80; Minimum: 16. + Floating point format string. Can be null. Default value: G6. + Format provider or culture. Can be null. + + + + Returns a string that represents the content of this vector, column by column. + + Floating point format string. Can be null. Default value: G6. + Format provider or culture. Can be null. + + + + Returns a string that summarizes this vector, column by column and with a type header. + + Maximum number of entries and thus lines per column. Typical value: 12; Minimum: 3. + Maximum number of chatacters per line over all columns. Typical value: 80; Minimum: 16. + Floating point format string. Can be null. Default value: G6. + Format provider or culture. Can be null. + + + + Returns a string that summarizes this vector. + The maximum number of cells can be configured in the class. + + + + + Returns a string that summarizes this vector. + The maximum number of cells can be configured in the class. + The format string is ignored. + + + + + Initializes a new instance of the Vector class. + + + + + Gets the raw vector data storage. + + + + + Gets the length or number of dimensions of this vector. + + + + Gets or sets the value at the given . + The index of the value to get or set. + The value of the vector at the given . + If is negative or + greater than the size of the vector. + + + Gets the value at the given without range checking.. + The index of the value to get or set. + The value of the vector at the given . + + + Sets the at the given without range checking.. + The index of the value to get or set. + The value to set. + + + + Resets all values to zero. + + + + + Sets all values of a subvector to zero. + + + + + Set all values whose absolute value is smaller than the threshold to zero, in-place. + + + + + Set all values that meet the predicate to zero, in-place. + + + + + Returns a deep-copy clone of the vector. + + A deep-copy clone of the vector. + + + + Set the values of this vector to the given values. + + The array containing the values to use. + If is . + If is not the same size as this vector. + + + + Copies the values of this vector into the target vector. + + The vector to copy elements into. + If is . + If is not the same size as this vector. + + + + Creates a vector containing specified elements. + + The first element to begin copying from. + The number of elements to copy. + A vector containing a copy of the specified elements. + If is not positive or + greater than or equal to the size of the vector. + If + is greater than or equal to the size of the vector. + + If is not positive. + + + + Copies the values of a given vector into a region in this vector. + + The field to start copying to + The number of fields to cpy. Must be positive. + The sub-vector to copy from. + If is + + + + Copies the requested elements from this vector to another. + + The vector to copy the elements to. + The element to start copying from. + The element to start copying to. + The number of elements to copy. + + + + Returns the data contained in the vector as an array. + The returned array will be independent from this vector. + A new memory block will be allocated for the array. + + The vector's data as an array. + + + + Returns the internal array of this vector if, and only if, this vector is stored by such an array internally. + Otherwise returns null. Changes to the returned array and the vector will affect each other. + Use ToArray instead if you always need an independent array. + + + + + Create a matrix based on this vector in column form (one single column). + + + This vector as a column matrix. + + + + + Create a matrix based on this vector in row form (one single row). + + + This vector as a row matrix. + + + + + Returns an IEnumerable that can be used to iterate through all values of the vector. + + + The enumerator will include all values, even if they are zero. + + + + + Returns an IEnumerable that can be used to iterate through all values of the vector. + + + The enumerator will include all values, even if they are zero. + + + + + Returns an IEnumerable that can be used to iterate through all values of the vector and their index. + + + The enumerator returns a Tuple with the first value being the element index + and the second value being the value of the element at that index. + The enumerator will include all values, even if they are zero. + + + + + Returns an IEnumerable that can be used to iterate through all values of the vector and their index. + + + The enumerator returns a Tuple with the first value being the element index + and the second value being the value of the element at that index. + The enumerator will include all values, even if they are zero. + + + + + Returns an IEnumerable that can be used to iterate through all non-zero values of the vector. + + + The enumerator will skip all elements with a zero value. + + + + + Returns an IEnumerable that can be used to iterate through all non-zero values of the vector and their index. + + + The enumerator returns a Tuple with the first value being the element index + and the second value being the value of the element at that index. + The enumerator will skip all elements with a zero value. + + + + + Applies a function to each value of this vector and replaces the value with its result. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value of this vector and replaces the value with its result. + The index of each value (zero-based) is passed as first argument to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value of this vector and replaces the value in the result vector. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value of this vector and replaces the value in the result vector. + The index of each value (zero-based) is passed as first argument to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value of this vector and replaces the value in the result vector. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value of this vector and replaces the value in the result vector. + The index of each value (zero-based) is passed as first argument to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value of this vector and returns the results as a new vector. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value of this vector and returns the results as a new vector. + The index of each value (zero-based) is passed as first argument to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value pair of two vectors and replaces the value in the result vector. + + + + + Applies a function to each value pair of two vectors and returns the results as a new vector. + + + + + Applies a function to update the status with each value pair of two vectors and returns the resulting status. + + + + + Returns a tuple with the index and value of the first element satisfying a predicate, or null if none is found. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns a tuple with the index and values of the first element pair of two vectors of the same size satisfying a predicate, or null if none is found. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if at least one element satisfies a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if at least one element pairs of two vectors of the same size satisfies a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if all elements satisfy a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if all element pairs of two vectors of the same size satisfy a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns a Vector containing the same values of . + + This method is included for completeness. + The vector to get the values from. + A vector containing the same values as . + If is . + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Adds a scalar to each element of a vector. + + The vector to add to. + The scalar value to add. + The result of the addition. + If is . + + + + Adds a scalar to each element of a vector. + + The scalar value to add. + The vector to add to. + The result of the addition. + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Subtracts a scalar from each element of a vector. + + The vector to subtract from. + The scalar value to subtract. + The result of the subtraction. + If is . + + + + Substracts each element of a vector from a scalar. + + The scalar value to subtract from. + The vector to subtract. + The result of the subtraction. + If is . + + + + Multiplies a vector with a scalar. + + The vector to scale. + The scalar value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a scalar. + + The scalar value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a scalar with a vector. + + The scalar to divide. + The vector. + The result of the division. + If is . + + + + Divides a vector with a scalar. + + The vector to divide. + The scalar value. + The result of the division. + If is . + + + + Pointwise divides two Vectors. + + The vector to divide. + The other vector. + The result of the division. + If and are not the same size. + If is . + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + of each element of the vector of the given divisor. + + The vector whose elements we want to compute the remainder of. + The divisor to use. + If is . + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + of the given dividend of each element of the vector. + + The dividend we want to compute the remainder of. + The vector whose elements we want to use as divisor. + If is . + + + + Computes the pointwise remainder (% operator), where the result has the sign of the dividend, + of each element of two vectors. + + The vector whose elements we want to compute the remainder of. + The divisor to use. + If and are not the same size. + If is . + + + + Computes the sqrt of a vector pointwise + + The input vector + + + + + Computes the exponential of a vector pointwise + + The input vector + + + + + Computes the log of a vector pointwise + + The input vector + + + + + Computes the log10 of a vector pointwise + + The input vector + + + + + Computes the sin of a vector pointwise + + The input vector + + + + + Computes the cos of a vector pointwise + + The input vector + + + + + Computes the tan of a vector pointwise + + The input vector + + + + + Computes the asin of a vector pointwise + + The input vector + + + + + Computes the acos of a vector pointwise + + The input vector + + + + + Computes the atan of a vector pointwise + + The input vector + + + + + Computes the sinh of a vector pointwise + + The input vector + + + + + Computes the cosh of a vector pointwise + + The input vector + + + + + Computes the tanh of a vector pointwise + + The input vector + + + + + Computes the absolute value of a vector pointwise + + The input vector + + + + + Computes the floor of a vector pointwise + + The input vector + + + + + Computes the ceiling of a vector pointwise + + The input vector + + + + + Computes the rounded value of a vector pointwise + + The input vector + + + + + Converts a vector to single precision. + + + + + Converts a vector to double precision. + + + + + Converts a vector to single precision complex numbers. + + + + + Converts a vector to double precision complex numbers. + + + + + Gets a single precision complex vector with the real parts from the given vector. + + + + + Gets a double precision complex vector with the real parts from the given vector. + + + + + Gets a real vector representing the real parts of a complex vector. + + + + + Gets a real vector representing the real parts of a complex vector. + + + + + Gets a real vector representing the imaginary parts of a complex vector. + + + + + Gets a real vector representing the imaginary parts of a complex vector. + + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + + Predictor matrix X + Response vector Y + The direct method to be used to compute the regression. + Best fitting vector for model parameters β + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + + Predictor matrix X + Response matrix Y + The direct method to be used to compute the regression. + Best fitting vector for model parameters β + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + + List of predictor-arrays. + List of responses + True if an intercept should be added as first artificial predictor value. Default = false. + The direct method to be used to compute the regression. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + Uses the cholesky decomposition of the normal equations. + + Sequence of predictor-arrays and their response. + True if an intercept should be added as first artificial predictor value. Default = false. + The direct method to be used to compute the regression. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + Uses the cholesky decomposition of the normal equations. + + Predictor matrix X + Response vector Y + Best fitting vector for model parameters β + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + Uses the cholesky decomposition of the normal equations. + + Predictor matrix X + Response matrix Y + Best fitting vector for model parameters β + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + Uses the cholesky decomposition of the normal equations. + + List of predictor-arrays. + List of responses + True if an intercept should be added as first artificial predictor value. Default = false. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + Uses the cholesky decomposition of the normal equations. + + Sequence of predictor-arrays and their response. + True if an intercept should be added as first artificial predictor value. Default = false. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + Uses an orthogonal decomposition and is therefore more numerically stable than the normal equations but also slower. + + Predictor matrix X + Response vector Y + Best fitting vector for model parameters β + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + Uses an orthogonal decomposition and is therefore more numerically stable than the normal equations but also slower. + + Predictor matrix X + Response matrix Y + Best fitting vector for model parameters β + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + Uses an orthogonal decomposition and is therefore more numerically stable than the normal equations but also slower. + + List of predictor-arrays. + List of responses + True if an intercept should be added as first artificial predictor value. Default = false. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + Uses an orthogonal decomposition and is therefore more numerically stable than the normal equations but also slower. + + Sequence of predictor-arrays and their response. + True if an intercept should be added as first artificial predictor value. Default = false. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + Uses a singular value decomposition and is therefore more numerically stable (especially if ill-conditioned) than the normal equations or QR but also slower. + + Predictor matrix X + Response vector Y + Best fitting vector for model parameters β + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + Uses a singular value decomposition and is therefore more numerically stable (especially if ill-conditioned) than the normal equations or QR but also slower. + + Predictor matrix X + Response matrix Y + Best fitting vector for model parameters β + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + Uses a singular value decomposition and is therefore more numerically stable (especially if ill-conditioned) than the normal equations or QR but also slower. + + List of predictor-arrays. + List of responses + True if an intercept should be added as first artificial predictor value. Default = false. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + Uses a singular value decomposition and is therefore more numerically stable (especially if ill-conditioned) than the normal equations or QR but also slower. + + Sequence of predictor-arrays and their response. + True if an intercept should be added as first artificial predictor value. Default = false. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Least-Squares fitting the points (x,y) to a line y : x -> a+b*x, + returning its best fitting parameters as (a, b) tuple, + where a is the intercept and b the slope. + + Predictor (independent) + Response (dependent) + + + + Least-Squares fitting the points (x,y) to a line y : x -> a+b*x, + returning its best fitting parameters as (a, b) tuple, + where a is the intercept and b the slope. + + Predictor-Response samples as tuples + + + + Weighted Linear Regression using normal equations. + + Predictor matrix X + Response vector Y + Weight matrix W, usually diagonal with an entry for each predictor (row). + + + + Weighted Linear Regression using normal equations. + + Predictor matrix X + Response matrix Y + Weight matrix W, usually diagonal with an entry for each predictor (row). + + + + Weighted Linear Regression using normal equations. + + Predictor matrix X + Response vector Y + Weight matrix W, usually diagonal with an entry for each predictor (row). + True if an intercept should be added as first artificial predictor value. Default = false. + + + + Weighted Linear Regression using normal equations. + + List of sample vectors (predictor) together with their response. + List of weights, one for each sample. + True if an intercept should be added as first artificial predictor value. Default = false. + + + + Locally-Weighted Linear Regression using normal equations. + + + + + Locally-Weighted Linear Regression using normal equations. + + + + + First Order AB method(same as Forward Euler) + + Initial value + Start Time + End Time + Size of output array(the larger, the finer) + ode model + approximation with size N + + + + Second Order AB Method + + Initial value 1 + Start Time + End Time + Size of output array(the larger, the finer) + ode model + approximation with size N + + + + Third Order AB Method + + Initial value 1 + Start Time + End Time + Size of output array(the larger, the finer) + ode model + approximation with size N + + + + Fourth Order AB Method + + Initial value 1 + Start Time + End Time + Size of output array(the larger, the finer) + ode model + approximation with size N + + + + ODE Solver Algorithms + + + + + Second Order Runge-Kutta method + + initial value + start time + end time + Size of output array(the larger, the finer) + ode function + approximations + + + + Fourth Order Runge-Kutta method + + initial value + start time + end time + Size of output array(the larger, the finer) + ode function + approximations + + + + Second Order Runge-Kutta to solve ODE SYSTEM + + initial vector + start time + end time + Size of output array(the larger, the finer) + ode function + approximations + + + + Fourth Order Runge-Kutta to solve ODE SYSTEM + + initial vector + start time + end time + Size of output array(the larger, the finer) + ode function + approximations + + + + Class to represent a permutation for a subset of the natural numbers. + + + + + Entry _indices[i] represents the location to which i is permuted to. + + + + + Initializes a new instance of the Permutation class. + + An array which represents where each integer is permuted too: indices[i] represents that integer i + is permuted to location indices[i]. + + + + Gets the number of elements this permutation is over. + + + + + Computes where permutes too. + + The index to permute from. + The index which is permuted to. + + + + Computes the inverse of the permutation. + + The inverse of the permutation. + + + + Construct an array from a sequence of inversions. + + + From wikipedia: the permutation 12043 has the inversions (0,2), (1,2) and (3,4). This would be + encoded using the array [22244]. + + The set of inversions to construct the permutation from. + A permutation generated from a sequence of inversions. + + + + Construct a sequence of inversions from the permutation. + + + From wikipedia: the permutation 12043 has the inversions (0,2), (1,2) and (3,4). This would be + encoded using the array [22244]. + + A sequence of inversions. + + + + Checks whether the array represents a proper permutation. + + An array which represents where each integer is permuted too: indices[i] represents that integer i + is permuted to location indices[i]. + True if represents a proper permutation, false otherwise. + + + + Utilities for working with floating point numbers. + + + + Useful links: + + + http://docs.sun.com/source/806-3568/ncg_goldberg.html#689 - What every computer scientist should know about floating-point arithmetic + + + http://en.wikipedia.org/wiki/Machine_epsilon - Gives the definition of machine epsilon + + + + + + + + Compares two doubles and determines which double is bigger. + a < b -> -1; a ~= b (almost equal according to parameter) -> 0; a > b -> +1. + + The first value. + The second value. + The absolute accuracy required for being almost equal. + + + + Compares two doubles and determines which double is bigger. + a < b -> -1; a ~= b (almost equal according to parameter) -> 0; a > b -> +1. + + The first value. + The second value. + The number of decimal places on which the values must be compared. Must be 1 or larger. + + + + Compares two doubles and determines which double is bigger. + a < b -> -1; a ~= b (almost equal according to parameter) -> 0; a > b -> +1. + + The first value. + The second value. + The relative accuracy required for being almost equal. + + + + Compares two doubles and determines which double is bigger. + a < b -> -1; a ~= b (almost equal according to parameter) -> 0; a > b -> +1. + + The first value. + The second value. + The number of decimal places on which the values must be compared. Must be 1 or larger. + + + + Compares two doubles and determines which double is bigger. + a < b -> -1; a ~= b (almost equal according to parameter) -> 0; a > b -> +1. + + The first value. + The second value. + The maximum error in terms of Units in Last Place (ulps), i.e. the maximum number of decimals that may be different. Must be 1 or larger. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The absolute accuracy required for being almost equal. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The absolute accuracy required for being almost equal. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The relative accuracy required for being almost equal. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The relative accuracy required for being almost equal. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the tolerance or not. Equality comparison is based on the binary representation. + + The first value. + The second value. + The maximum number of floating point values for which the two values are considered equal. Must be 1 or larger. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the tolerance or not. Equality comparison is based on the binary representation. + + The first value. + The second value. + The maximum number of floating point values for which the two values are considered equal. Must be 1 or larger. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of thg. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of thg. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The absolute accuracy required for being almost equal. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The absolute accuracy required for being almost equal. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The number of decimal places. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The number of decimal places. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The relative accuracy required for being almost equal. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The relative accuracy required for being almost equal. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the tolerance or not. Equality comparison is based on the binary representation. + + The first value. + The second value. + The maximum number of floating point values for which the two values are considered equal. Must be 1 or larger. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the tolerance or not. Equality comparison is based on the binary representation. + + The first value. + The second value. + The maximum number of floating point values for which the two values are considered equal. Must be 1 or larger. + true if the first value is smaller than the second value; otherwise false. + + + + The number of binary digits used to represent the binary number for a double precision floating + point value. i.e. there are this many digits used to represent the + actual number, where in a number as: 0.134556 * 10^5 the digits are 0.134556 and the exponent is 5. + + + + + The number of binary digits used to represent the binary number for a single precision floating + point value. i.e. there are this many digits used to represent the + actual number, where in a number as: 0.134556 * 10^5 the digits are 0.134556 and the exponent is 5. + + + + + Standard epsilon, the maximum relative precision of IEEE 754 double-precision floating numbers (64 bit). + According to the definition of Prof. Demmel and used in LAPACK and Scilab. + + + + + Standard epsilon, the maximum relative precision of IEEE 754 double-precision floating numbers (64 bit). + According to the definition of Prof. Higham and used in the ISO C standard and MATLAB. + + + + + Standard epsilon, the maximum relative precision of IEEE 754 single-precision floating numbers (32 bit). + According to the definition of Prof. Demmel and used in LAPACK and Scilab. + + + + + Standard epsilon, the maximum relative precision of IEEE 754 single-precision floating numbers (32 bit). + According to the definition of Prof. Higham and used in the ISO C standard and MATLAB. + + + + + Actual double precision machine epsilon, the smallest number that can be subtracted from 1, yielding a results different than 1. + This is also known as unit roundoff error. According to the definition of Prof. Demmel. + On a standard machine this is equivalent to `DoublePrecision`. + + + + + Actual double precision machine epsilon, the smallest number that can be added to 1, yielding a results different than 1. + This is also known as unit roundoff error. According to the definition of Prof. Higham. + On a standard machine this is equivalent to `PositiveDoublePrecision`. + + + + + The number of significant decimal places of double-precision floating numbers (64 bit). + + + + + The number of significant decimal places of single-precision floating numbers (32 bit). + + + + + Value representing 10 * 2^(-53) = 1.11022302462516E-15 + + + + + Value representing 10 * 2^(-24) = 5.96046447753906E-07 + + + + + Returns the magnitude of the number. + + The value. + The magnitude of the number. + + + + Returns the magnitude of the number. + + The value. + The magnitude of the number. + + + + Returns the number divided by it's magnitude, effectively returning a number between -10 and 10. + + The value. + The value of the number. + + + + Returns a 'directional' long value. This is a long value which acts the same as a double, + e.g. a negative double value will return a negative double value starting at 0 and going + more negative as the double value gets more negative. + + The input double value. + A long value which is roughly the equivalent of the double value. + + + + Returns a 'directional' int value. This is a int value which acts the same as a float, + e.g. a negative float value will return a negative int value starting at 0 and going + more negative as the float value gets more negative. + + The input float value. + An int value which is roughly the equivalent of the double value. + + + + Increments a floating point number to the next bigger number representable by the data type. + + The value which needs to be incremented. + How many times the number should be incremented. + + The incrementation step length depends on the provided value. + Increment(double.MaxValue) will return positive infinity. + + The next larger floating point value. + + + + Decrements a floating point number to the next smaller number representable by the data type. + + The value which should be decremented. + How many times the number should be decremented. + + The decrementation step length depends on the provided value. + Decrement(double.MinValue) will return negative infinity. + + The next smaller floating point value. + + + + Forces small numbers near zero to zero, according to the specified absolute accuracy. + + The real number to coerce to zero, if it is almost zero. + The maximum count of numbers between the zero and the number . + + Zero if || is fewer than numbers from zero, otherwise. + + + + + Forces small numbers near zero to zero, according to the specified absolute accuracy. + + The real number to coerce to zero, if it is almost zero. + The maximum count of numbers between the zero and the number . + + Zero if || is fewer than numbers from zero, otherwise. + + + Thrown if is smaller than zero. + + + + + Forces small numbers near zero to zero, according to the specified absolute accuracy. + + The real number to coerce to zero, if it is almost zero. + The absolute threshold for to consider it as zero. + Zero if || is smaller than , otherwise. + + Thrown if is smaller than zero. + + + + + Forces small numbers near zero to zero. + + The real number to coerce to zero, if it is almost zero. + Zero if || is smaller than 2^(-53) = 1.11e-16, otherwise. + + + + Determines the range of floating point numbers that will match the specified value with the given tolerance. + + The value. + The ulps difference. + + Thrown if is smaller than zero. + + Tuple of the bottom and top range ends. + + + + Returns the floating point number that will match the value with the tolerance on the maximum size (i.e. the result is + always bigger than the value) + + The value. + The ulps difference. + The maximum floating point number which is larger than the given . + + + + Returns the floating point number that will match the value with the tolerance on the minimum size (i.e. the result is + always smaller than the value) + + The value. + The ulps difference. + The minimum floating point number which is smaller than the given . + + + + Determines the range of ulps that will match the specified value with the given tolerance. + + The value. + The relative difference. + + Thrown if is smaller than zero. + + + Thrown if is double.PositiveInfinity or double.NegativeInfinity. + + + Thrown if is double.NaN. + + + Tuple with the number of ULPS between the value and the value - relativeDifference as first, + and the number of ULPS between the value and the value + relativeDifference as second value. + + + + + Evaluates the count of numbers between two double numbers + + The first parameter. + The second parameter. + The second number is included in the number, thus two equal numbers evaluate to zero and two neighbor numbers evaluate to one. Therefore, what is returned is actually the count of numbers between plus 1. + The number of floating point values between and . + + Thrown if is double.PositiveInfinity or double.NegativeInfinity. + + + Thrown if is double.NaN. + + + Thrown if is double.PositiveInfinity or double.NegativeInfinity. + + + Thrown if is double.NaN. + + + + + Evaluates the minimum distance to the next distinguishable number near the argument value. + + The value used to determine the minimum distance. + + Relative Epsilon (positive double or NaN). + + Evaluates the negative epsilon. The more common positive epsilon is equal to two times this negative epsilon. + + + + + Evaluates the minimum distance to the next distinguishable number near the argument value. + + The value used to determine the minimum distance. + + Relative Epsilon (positive float or NaN). + + Evaluates the negative epsilon. The more common positive epsilon is equal to two times this negative epsilon. + + + + + Evaluates the minimum distance to the next distinguishable number near the argument value. + + The value used to determine the minimum distance. + Relative Epsilon (positive double or NaN) + Evaluates the positive epsilon. See also + + + + + Evaluates the minimum distance to the next distinguishable number near the argument value. + + The value used to determine the minimum distance. + Relative Epsilon (positive float or NaN) + Evaluates the positive epsilon. See also + + + + + Calculates the actual (negative) double precision machine epsilon - the smallest number that can be subtracted from 1, yielding a results different than 1. + This is also known as unit roundoff error. According to the definition of Prof. Demmel. + + Positive Machine epsilon + + + + Calculates the actual positive double precision machine epsilon - the smallest number that can be added to 1, yielding a results different than 1. + This is also known as unit roundoff error. According to the definition of Prof. Higham. + + Machine epsilon + + + + Compares two doubles and determines if they are equal + within the specified maximum absolute error. + + The norm of the first value (can be negative). + The norm of the second value (can be negative). + The norm of the difference of the two values (can be negative). + The absolute accuracy required for being almost equal. + True if both doubles are almost equal up to the specified maximum absolute error, false otherwise. + + + + Compares two doubles and determines if they are equal + within the specified maximum absolute error. + + The first value. + The second value. + The absolute accuracy required for being almost equal. + True if both doubles are almost equal up to the specified maximum absolute error, false otherwise. + + + + Compares two doubles and determines if they are equal + within the specified maximum error. + + The norm of the first value (can be negative). + The norm of the second value (can be negative). + The norm of the difference of the two values (can be negative). + The accuracy required for being almost equal. + True if both doubles are almost equal up to the specified maximum error, false otherwise. + + + + Compares two doubles and determines if they are equal + within the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + True if both doubles are almost equal up to the specified maximum error, false otherwise. + + + + Compares two doubles and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two complex and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two complex and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two complex and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two doubles and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two complex and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two complex and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two complex and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Checks whether two real numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Checks whether two real numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Checks whether two Compex numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Checks whether two Compex numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Checks whether two real numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Checks whether two real numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Checks whether two Compex numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Checks whether two Compex numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not, using the + number of decimal places as an absolute measure. + + + + The values are equal if the difference between the two numbers is smaller than 0.5e-decimalPlaces. We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The norm of the first value (can be negative). + The norm of the second value (can be negative). + The norm of the difference of the two values (can be negative). + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not, using the + number of decimal places as an absolute measure. + + + + The values are equal if the difference between the two numbers is smaller than 0.5e-decimalPlaces. We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not. If the numbers + are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The norm of the first value (can be negative). + The norm of the second value (can be negative). + The norm of the difference of the two values (can be negative). + The number of decimal places. + Thrown if is smaller than zero. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not. If the numbers + are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not, using the + number of decimal places as an absolute measure. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not, using the + number of decimal places as an absolute measure. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not, using the + number of decimal places as an absolute measure. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not, using the + number of decimal places as an absolute measure. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not. If the numbers + are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not. If the numbers + are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not. If the numbers + are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not. If the numbers + are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the tolerance or not. Equality comparison is based on the binary representation. + + + + Determines the 'number' of floating point numbers between two values (i.e. the number of discrete steps + between the two numbers) and then checks if that is within the specified tolerance. So if a tolerance + of 1 is passed then the result will be true only if the two numbers have the same binary representation + OR if they are two adjacent numbers that only differ by one step. + + + The comparison method used is explained in http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm . The article + at http://www.extremeoptimization.com/resources/Articles/FPDotNetConceptsAndFormats.aspx explains how to transform the C code to + .NET enabled code without using pointers and unsafe code. + + + The first value. + The second value. + The maximum number of floating point values between the two values. Must be 1 or larger. + Thrown if is smaller than one. + + + + Compares two floats and determines if they are equal to within the tolerance or not. Equality comparison is based on the binary representation. + + The first value. + The second value. + The maximum number of floating point values between the two values. Must be 1 or larger. + Thrown if is smaller than one. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two vectors and determines if they are equal within the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two vectors and determines if they are equal within the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two vectors and determines if they are equal to within the specified number + of decimal places or not, using the number of decimal places as an absolute measure. + + The first value. + The second value. + The number of decimal places. + + + + Compares two vectors and determines if they are equal to within the specified number of decimal places or not. + If the numbers are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + The first value. + The second value. + The number of decimal places. + + + + Compares two matrices and determines if they are equal within the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two matrices and determines if they are equal within the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two matrices and determines if they are equal to within the specified number + of decimal places or not, using the number of decimal places as an absolute measure. + + The first value. + The second value. + The number of decimal places. + + + + Compares two matrices and determines if they are equal to within the specified number of decimal places or not. + If the numbers are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + The first value. + The second value. + The number of decimal places. + + + + Support Interface for Precision Operations (like AlmostEquals). + + Type of the implementing class. + + + + Returns a Norm of a value of this type, which is appropriate for measuring how + close this value is to zero. + + A norm of this value. + + + + Returns a Norm of the difference of two values of this type, which is + appropriate for measuring how close together these two values are. + + The value to compare with. + A norm of the difference between this and the other value. + + + + Consistency vs. performance trade-off between runs on different machines. + + + + Consistent on the same CPU only (maximum performance) + + + Consistent on Intel and compatible CPUs with SSE2 support (maximum compatibility) + + + Consistent on Intel CPUs supporting SSE2 or later + + + Consistent on Intel CPUs supporting SSE4.2 or later + + + Consistent on Intel CPUs supporting AVX or later + + + Consistent on Intel CPUs supporting AVX2 or later + + + + Use the best provider available. + + + + + Use a specific provider if configured, e.g. using the + "MathNetNumericsFFTProvider" environment variable, + or fall back to the best provider. + + + + + Try to find out whether the provider is available, at least in principle. + Verification may still fail if available, but it will certainly fail if unavailable. + + + + + Initialize and verify that the provided is indeed available. If not, fall back to alternatives like the managed provider + + + + + Try to find out whether the provider is available, at least in principle. + Verification may still fail if available, but it will certainly fail if unavailable. + + + + + Initialize and verify that the provided is indeed available. If not, fall back to alternatives like the managed provider + + + + + How to transpose a matrix. + + + + + Don't transpose a matrix. + + + + + Transpose a matrix. + + + + + Conjugate transpose a complex matrix. + + If a conjugate transpose is used with a real matrix, then the matrix is just transposed. + + + + Types of matrix norms. + + + + + The 1-norm. + + + + + The Frobenius norm. + + + + + The infinity norm. + + + + + The largest absolute value norm. + + + + + Interface to linear algebra algorithms that work off 1-D arrays. + + + + + Try to find out whether the provider is available, at least in principle. + Verification may still fail if available, but it will certainly fail if unavailable. + + + + + Initialize and verify that the provided is indeed available. If not, fall back to alternatives like the managed provider + + + + + Interface to linear algebra algorithms that work off 1-D arrays. + + Supported data types are Double, Single, Complex, and Complex32. + + + + Adds a scaled vector to another: result = y + alpha*x. + + The vector to update. + The value to scale by. + The vector to add to . + The result of the addition. + This is similar to the AXPY BLAS routine. + + + + Scales an array. Can be used to scale a vector and a matrix. + + The scalar. + The values to scale. + This result of the scaling. + This is similar to the SCAL BLAS routine. + + + + Conjugates an array. Can be used to conjugate a vector and a matrix. + + The values to conjugate. + This result of the conjugation. + + + + Computes the dot product of x and y. + + The vector x. + The vector y. + The dot product of x and y. + This is equivalent to the DOT BLAS routine. + + + + Does a point wise add of two arrays z = x + y. This can be used + to add vectors or matrices. + + The array x. + The array y. + The result of the addition. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise subtraction of two arrays z = x - y. This can be used + to subtract vectors or matrices. + + The array x. + The array y. + The result of the subtraction. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise multiplication of two arrays z = x * y. This can be used + to multiply elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise multiplication. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise division of two arrays z = x / y. This can be used + to divide elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise division. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise power of two arrays z = x ^ y. This can be used + to raise elements of vectors or matrices to the powers of another vector or matrix. + + The array x. + The array y. + The result of the point wise power. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Computes the requested of the matrix. + + The type of norm to compute. + The number of rows. + The number of columns. + The matrix to compute the norm from. + + The requested of the matrix. + + + + + Multiples two matrices. result = x * y + + The x matrix. + The number of rows in the x matrix. + The number of columns in the x matrix. + The y matrix. + The number of rows in the y matrix. + The number of columns in the y matrix. + Where to store the result of the multiplication. + This is a simplified version of the BLAS GEMM routine with alpha + set to 1.0 and beta set to 0.0, and x and y are not transposed. + + + + Multiplies two matrices and updates another with the result. c = alpha*op(a)*op(b) + beta*c + + How to transpose the matrix. + How to transpose the matrix. + The value to scale matrix. + The a matrix. + The number of rows in the matrix. + The number of columns in the matrix. + The b matrix + The number of rows in the matrix. + The number of columns in the matrix. + The value to scale the matrix. + The c matrix. + + + + Computes the LUP factorization of A. P*A = L*U. + + An by matrix. The matrix is overwritten with the + the LU factorization on exit. The lower triangular factor L is stored in under the diagonal of (the diagonal is always 1.0 + for the L factor). The upper triangular factor U is stored on and above the diagonal of . + The order of the square matrix . + On exit, it contains the pivot indices. The size of the array must be . + This is equivalent to the GETRF LAPACK routine. + + + + Computes the inverse of matrix using LU factorization. + + The N by N matrix to invert. Contains the inverse On exit. + The order of the square matrix . + This is equivalent to the GETRF and GETRI LAPACK routines. + + + + Computes the inverse of a previously factored matrix. + + The LU factored N by N matrix. Contains the inverse On exit. + The order of the square matrix . + The pivot indices of . + This is equivalent to the GETRI LAPACK routine. + + + + Solves A*X=B for X using LU factorization. + + The number of columns of B. + The square matrix A. + The order of the square matrix . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRF and GETRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The number of columns of B. + The factored A matrix. + The order of the square matrix . + The pivot indices of . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRS LAPACK routine. + + + + Computes the Cholesky factorization of A. + + On entry, a square, positive definite matrix. On exit, the matrix is overwritten with the + the Cholesky factorization. + The number of rows or columns in the matrix. + This is equivalent to the POTRF LAPACK routine. + + + + Solves A*X=B for X using Cholesky factorization. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRF add POTRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRS LAPACK routine. + + + + Computes the full QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the R matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A M by M matrix that holds the Q matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Computes the thin QR factorization of A where M > N. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the Q matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A N by N matrix that holds the R matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Solves A*X=B for X using QR factorization of A. + + The A matrix. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Solves A*X=B for X using a previously QR factored matrix. + + The Q matrix obtained by QR factor. This is only used for the managed provider and can be + null for the native provider. The native provider uses the Q portion stored in the R matrix. + The R matrix obtained by calling . + The number of rows in the A matrix. + The number of columns in the A matrix. + Contains additional information on Q. Only used for the native solver + and can be null for the managed provider. + On entry the B matrix; on exit the X matrix. + The number of columns of B. + On exit, the solution matrix. + Rows must be greater or equal to columns. + The type of QR factorization to perform. + + + + Computes the singular value decomposition of A. + + Compute the singular U and VT vectors or not. + On entry, the M by N matrix to decompose. On exit, A may be overwritten. + The number of rows in the A matrix. + The number of columns in the A matrix. + The singular values of A in ascending value. + If is true, on exit U contains the left + singular vectors. + If is true, on exit VT contains the transposed + right singular vectors. + This is equivalent to the GESVD LAPACK routine. + + + + Solves A*X=B for X using the singular value decomposition of A. + + On entry, the M by N matrix to decompose. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Solves A*X=B for X using a previously SVD decomposed matrix. + + The number of rows in the A matrix. + The number of columns in the A matrix. + The s values returned by . + The left singular vectors returned by . + The right singular vectors returned by . + The B matrix + The number of columns of B. + On exit, the solution matrix. + + + + Computes the eigenvalues and eigenvectors of a matrix. + + Whether the matrix is symmetric or not. + The order of the matrix. + The matrix to decompose. The lenth of the array must be order * order. + On output, the matrix contains the eigen vectors. The lenth of the array must be order * order. + On output, the eigen values (λ) of matrix in ascending value. The length of the arry must . + On output, the block diagonal eigenvalue matrix. The lenth of the array must be order * order. + + + + Use the best provider available. + + + + + Use a specific provider if configured, e.g. using the + "MathNetNumericsLAProvider" environment variable, + or fall back to the best provider. + + + + + The managed linear algebra provider. + + + The managed linear algebra provider. + + + The managed linear algebra provider. + + + The managed linear algebra provider. + + + The managed linear algebra provider. + + + + + Adds a scaled vector to another: result = y + alpha*x. + + The vector to update. + The value to scale by. + The vector to add to . + The result of the addition. + This is similar to the AXPY BLAS routine. + + + + Scales an array. Can be used to scale a vector and a matrix. + + The scalar. + The values to scale. + This result of the scaling. + This is similar to the SCAL BLAS routine. + + + + Conjugates an array. Can be used to conjugate a vector and a matrix. + + The values to conjugate. + This result of the conjugation. + + + + Computes the dot product of x and y. + + The vector x. + The vector y. + The dot product of x and y. + This is equivalent to the DOT BLAS routine. + + + + Does a point wise add of two arrays z = x + y. This can be used + to add vectors or matrices. + + The array x. + The array y. + The result of the addition. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise subtraction of two arrays z = x - y. This can be used + to subtract vectors or matrices. + + The array x. + The array y. + The result of the subtraction. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise multiplication of two arrays z = x * y. This can be used + to multiple elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise multiplication. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise division of two arrays z = x / y. This can be used + to divide elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise division. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise power of two arrays z = x ^ y. This can be used + to raise elements of vectors or matrices to the powers of another vector or matrix. + + The array x. + The array y. + The result of the point wise power. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Computes the requested of the matrix. + + The type of norm to compute. + The number of rows. + The number of columns. + The matrix to compute the norm from. + + The requested of the matrix. + + + + + Multiples two matrices. result = x * y + + The x matrix. + The number of rows in the x matrix. + The number of columns in the x matrix. + The y matrix. + The number of rows in the y matrix. + The number of columns in the y matrix. + Where to store the result of the multiplication. + This is a simplified version of the BLAS GEMM routine with alpha + set to 1.0 and beta set to 0.0, and x and y are not transposed. + + + + Multiplies two matrices and updates another with the result. c = alpha*op(a)*op(b) + beta*c + + How to transpose the matrix. + How to transpose the matrix. + The value to scale matrix. + The a matrix. + The number of rows in the matrix. + The number of columns in the matrix. + The b matrix + The number of rows in the matrix. + The number of columns in the matrix. + The value to scale the matrix. + The c matrix. + + + + Cache-Oblivious Matrix Multiplication + + if set to true transpose matrix A. + if set to true transpose matrix B. + The value to scale the matrix A with. + The matrix A. + Row-shift of the left matrix + Column-shift of the left matrix + The matrix B. + Row-shift of the right matrix + Column-shift of the right matrix + The matrix C. + Row-shift of the result matrix + Column-shift of the result matrix + The number of rows of matrix op(A) and of the matrix C. + The number of columns of matrix op(B) and of the matrix C. + The number of columns of matrix op(A) and the rows of the matrix op(B). + The constant number of rows of matrix op(A) and of the matrix C. + The constant number of columns of matrix op(B) and of the matrix C. + The constant number of columns of matrix op(A) and the rows of the matrix op(B). + Indicates if this is the first recursion. + + + + Computes the LUP factorization of A. P*A = L*U. + + An by matrix. The matrix is overwritten with the + the LU factorization on exit. The lower triangular factor L is stored in under the diagonal of (the diagonal is always 1.0 + for the L factor). The upper triangular factor U is stored on and above the diagonal of . + The order of the square matrix . + On exit, it contains the pivot indices. The size of the array must be . + This is equivalent to the GETRF LAPACK routine. + + + + Computes the inverse of matrix using LU factorization. + + The N by N matrix to invert. Contains the inverse On exit. + The order of the square matrix . + This is equivalent to the GETRF and GETRI LAPACK routines. + + + + Computes the inverse of a previously factored matrix. + + The LU factored N by N matrix. Contains the inverse On exit. + The order of the square matrix . + The pivot indices of . + This is equivalent to the GETRI LAPACK routine. + + + + Solves A*X=B for X using LU factorization. + + The number of columns of B. + The square matrix A. + The order of the square matrix . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRF and GETRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The number of columns of B. + The factored A matrix. + The order of the square matrix . + The pivot indices of . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRS LAPACK routine. + + + + Computes the Cholesky factorization of A. + + On entry, a square, positive definite matrix. On exit, the matrix is overwritten with the + the Cholesky factorization. + The number of rows or columns in the matrix. + This is equivalent to the POTRF LAPACK routine. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves A*X=B for X using Cholesky factorization. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRF add POTRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. + The number of rows and columns in A. + The B matrix. + The number of columns in the B matrix. + This is equivalent to the POTRS LAPACK routine. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. Has to be different than . + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The column to solve for. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the R matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A M by M matrix that holds the Q matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the Q matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A N by N matrix that holds the R matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Perform calculation of Q or R + + Work array + Index of column in work array + Q or R matrices + The first row in + The last row + The first column + The last column + Number of available CPUs + + + + Generate column from initial matrix to work array + + Work array + Initial matrix + The number of rows in matrix + The first row + Column index + + + + Solves A*X=B for X using QR factorization of A. + + The A matrix. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Solves A*X=B for X using a previously QR factored matrix. + + The Q matrix obtained by calling . + The R matrix obtained by calling . + The number of rows in the A matrix. + The number of columns in the A matrix. + Contains additional information on Q. Only used for the native solver + and can be null for the managed provider. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Computes the singular value decomposition of A. + + Compute the singular U and VT vectors or not. + On entry, the M by N matrix to decompose. On exit, A may be overwritten. + The number of rows in the A matrix. + The number of columns in the A matrix. + The singular values of A in ascending value. + If is true, on exit U contains the left + singular vectors. + If is true, on exit VT contains the transposed + right singular vectors. + This is equivalent to the GESVD LAPACK routine. + + + + Solves A*X=B for X using the singular value decomposition of A. + + On entry, the M by N matrix to decompose. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Solves A*X=B for X using a previously SVD decomposed matrix. + + The number of rows in the A matrix. + The number of columns in the A matrix. + The s values returned by . + The left singular vectors returned by . + The right singular vectors returned by . + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Computes the eigenvalues and eigenvectors of a matrix. + + Whether the matrix is symmetric or not. + The order of the matrix. + The matrix to decompose. The lenth of the array must be order * order. + On output, the matrix contains the eigen vectors. The lenth of the array must be order * order. + On output, the eigen values (λ) of matrix in ascending value. The length of the arry must . + On output, the block diagonal eigenvalue matrix. The lenth of the array must be order * order. + + + + Assumes that and have already been transposed. + + + + + Assumes that and have already been transposed. + + + + + Adds a scaled vector to another: result = y + alpha*x. + + The vector to update. + The value to scale by. + The vector to add to . + The result of the addition. + This is similar to the AXPY BLAS routine. + + + + Scales an array. Can be used to scale a vector and a matrix. + + The scalar. + The values to scale. + This result of the scaling. + This is similar to the SCAL BLAS routine. + + + + Conjugates an array. Can be used to conjugate a vector and a matrix. + + The values to conjugate. + This result of the conjugation. + + + + Computes the dot product of x and y. + + The vector x. + The vector y. + The dot product of x and y. + This is equivalent to the DOT BLAS routine. + + + + Does a point wise add of two arrays z = x + y. This can be used + to add vectors or matrices. + + The array x. + The array y. + The result of the addition. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise subtraction of two arrays z = x - y. This can be used + to subtract vectors or matrices. + + The array x. + The array y. + The result of the subtraction. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise multiplication of two arrays z = x * y. This can be used + to multiple elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise multiplication. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise division of two arrays z = x / y. This can be used + to divide elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise division. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise power of two arrays z = x ^ y. This can be used + to raise elements of vectors or matrices to the powers of another vector or matrix. + + The array x. + The array y. + The result of the point wise power. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Computes the requested of the matrix. + + The type of norm to compute. + The number of rows. + The number of columns. + The matrix to compute the norm from. + The requested of the matrix. + + + + Multiples two matrices. result = x * y + + The x matrix. + The number of rows in the x matrix. + The number of columns in the x matrix. + The y matrix. + The number of rows in the y matrix. + The number of columns in the y matrix. + Where to store the result of the multiplication. + This is a simplified version of the BLAS GEMM routine with alpha + set to 1.0 and beta set to 0.0, and x and y are not transposed. + + + + Multiplies two matrices and updates another with the result. c = alpha*op(a)*op(b) + beta*c + + How to transpose the matrix. + How to transpose the matrix. + The value to scale matrix. + The a matrix. + The number of rows in the matrix. + The number of columns in the matrix. + The b matrix + The number of rows in the matrix. + The number of columns in the matrix. + The value to scale the matrix. + The c matrix. + + + + Cache-Oblivious Matrix Multiplication + + if set to true transpose matrix A. + if set to true transpose matrix B. + The value to scale the matrix A with. + The matrix A. + Row-shift of the left matrix + Column-shift of the left matrix + The matrix B. + Row-shift of the right matrix + Column-shift of the right matrix + The matrix C. + Row-shift of the result matrix + Column-shift of the result matrix + The number of rows of matrix op(A) and of the matrix C. + The number of columns of matrix op(B) and of the matrix C. + The number of columns of matrix op(A) and the rows of the matrix op(B). + The constant number of rows of matrix op(A) and of the matrix C. + The constant number of columns of matrix op(B) and of the matrix C. + The constant number of columns of matrix op(A) and the rows of the matrix op(B). + Indicates if this is the first recursion. + + + + Computes the LUP factorization of A. P*A = L*U. + + An by matrix. The matrix is overwritten with the + the LU factorization on exit. The lower triangular factor L is stored in under the diagonal of (the diagonal is always 1.0 + for the L factor). The upper triangular factor U is stored on and above the diagonal of . + The order of the square matrix . + On exit, it contains the pivot indices. The size of the array must be . + This is equivalent to the GETRF LAPACK routine. + + + + Computes the inverse of matrix using LU factorization. + + The N by N matrix to invert. Contains the inverse On exit. + The order of the square matrix . + This is equivalent to the GETRF and GETRI LAPACK routines. + + + + Computes the inverse of a previously factored matrix. + + The LU factored N by N matrix. Contains the inverse On exit. + The order of the square matrix . + The pivot indices of . + This is equivalent to the GETRI LAPACK routine. + + + + Solves A*X=B for X using LU factorization. + + The number of columns of B. + The square matrix A. + The order of the square matrix . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRF and GETRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The number of columns of B. + The factored A matrix. + The order of the square matrix . + The pivot indices of . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRS LAPACK routine. + + + + Computes the Cholesky factorization of A. + + On entry, a square, positive definite matrix. On exit, the matrix is overwritten with the + the Cholesky factorization. + The number of rows or columns in the matrix. + This is equivalent to the POTRF LAPACK routine. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves A*X=B for X using Cholesky factorization. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRF add POTRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRS LAPACK routine. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. Has to be different than . + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The column to solve for. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the R matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A M by M matrix that holds the Q matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the Q matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A N by N matrix that holds the R matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Perform calculation of Q or R + + Work array + Index of column in work array + Q or R matrices + The first row in + The last row + The first column + The last column + Number of available CPUs + + + + Generate column from initial matrix to work array + + Work array + Initial matrix + The number of rows in matrix + The first row + Column index + + + + Solves A*X=B for X using QR factorization of A. + + The A matrix. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Solves A*X=B for X using a previously QR factored matrix. + + The Q matrix obtained by calling . + The R matrix obtained by calling . + The number of rows in the A matrix. + The number of columns in the A matrix. + Contains additional information on Q. Only used for the native solver + and can be null for the managed provider. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Computes the singular value decomposition of A. + + Compute the singular U and VT vectors or not. + On entry, the M by N matrix to decompose. On exit, A may be overwritten. + The number of rows in the A matrix. + The number of columns in the A matrix. + The singular values of A in ascending value. + If is true, on exit U contains the left + singular vectors. + If is true, on exit VT contains the transposed + right singular vectors. + This is equivalent to the GESVD LAPACK routine. + + + + Solves A*X=B for X using the singular value decomposition of A. + + On entry, the M by N matrix to decompose. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Solves A*X=B for X using a previously SVD decomposed matrix. + + The number of rows in the A matrix. + The number of columns in the A matrix. + The s values returned by . + The left singular vectors returned by . + The right singular vectors returned by . + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Computes the eigenvalues and eigenvectors of a matrix. + + Whether the matrix is symmetric or not. + The order of the matrix. + The matrix to decompose. The lenth of the array must be order * order. + On output, the matrix contains the eigen vectors. The lenth of the array must be order * order. + On output, the eigen values (λ) of matrix in ascending value. The length of the arry must . + On output, the block diagonal eigenvalue matrix. The lenth of the array must be order * order. + + + + Assumes that and have already been transposed. + + + + + Assumes that and have already been transposed. + + + + + Try to find out whether the provider is available, at least in principle. + Verification may still fail if available, but it will certainly fail if unavailable. + + + + + Initialize and verify that the provided is indeed available. If not, fall back to alternatives like the managed provider + + + + + Assumes that and have already been transposed. + + + + + Assumes that and have already been transposed. + + + + + Adds a scaled vector to another: result = y + alpha*x. + + The vector to update. + The value to scale by. + The vector to add to . + The result of the addition. + This is similar to the AXPY BLAS routine. + + + + Scales an array. Can be used to scale a vector and a matrix. + + The scalar. + The values to scale. + This result of the scaling. + This is similar to the SCAL BLAS routine. + + + + Conjugates an array. Can be used to conjugate a vector and a matrix. + + The values to conjugate. + This result of the conjugation. + + + + Computes the dot product of x and y. + + The vector x. + The vector y. + The dot product of x and y. + This is equivalent to the DOT BLAS routine. + + + + Does a point wise add of two arrays z = x + y. This can be used + to add vectors or matrices. + + The array x. + The array y. + The result of the addition. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise subtraction of two arrays z = x - y. This can be used + to subtract vectors or matrices. + + The array x. + The array y. + The result of the subtraction. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise multiplication of two arrays z = x * y. This can be used + to multiple elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise multiplication. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise division of two arrays z = x / y. This can be used + to divide elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise division. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise power of two arrays z = x ^ y. This can be used + to raise elements of vectors or matrices to the powers of another vector or matrix. + + The array x. + The array y. + The result of the point wise power. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Computes the requested of the matrix. + + The type of norm to compute. + The number of rows. + The number of columns. + The matrix to compute the norm from. + + The requested of the matrix. + + + + + Multiples two matrices. result = x * y + + The x matrix. + The number of rows in the x matrix. + The number of columns in the x matrix. + The y matrix. + The number of rows in the y matrix. + The number of columns in the y matrix. + Where to store the result of the multiplication. + This is a simplified version of the BLAS GEMM routine with alpha + set to 1.0 and beta set to 0.0, and x and y are not transposed. + + + + Multiplies two matrices and updates another with the result. c = alpha*op(a)*op(b) + beta*c + + How to transpose the matrix. + How to transpose the matrix. + The value to scale matrix. + The a matrix. + The number of rows in the matrix. + The number of columns in the matrix. + The b matrix + The number of rows in the matrix. + The number of columns in the matrix. + The value to scale the matrix. + The c matrix. + + + + Cache-Oblivious Matrix Multiplication + + if set to true transpose matrix A. + if set to true transpose matrix B. + The value to scale the matrix A with. + The matrix A. + Row-shift of the left matrix + Column-shift of the left matrix + The matrix B. + Row-shift of the right matrix + Column-shift of the right matrix + The matrix C. + Row-shift of the result matrix + Column-shift of the result matrix + The number of rows of matrix op(A) and of the matrix C. + The number of columns of matrix op(B) and of the matrix C. + The number of columns of matrix op(A) and the rows of the matrix op(B). + The constant number of rows of matrix op(A) and of the matrix C. + The constant number of columns of matrix op(B) and of the matrix C. + The constant number of columns of matrix op(A) and the rows of the matrix op(B). + Indicates if this is the first recursion. + + + + Computes the LUP factorization of A. P*A = L*U. + + An by matrix. The matrix is overwritten with the + the LU factorization on exit. The lower triangular factor L is stored in under the diagonal of (the diagonal is always 1.0 + for the L factor). The upper triangular factor U is stored on and above the diagonal of . + The order of the square matrix . + On exit, it contains the pivot indices. The size of the array must be . + This is equivalent to the GETRF LAPACK routine. + + + + Computes the inverse of matrix using LU factorization. + + The N by N matrix to invert. Contains the inverse On exit. + The order of the square matrix . + This is equivalent to the GETRF and GETRI LAPACK routines. + + + + Computes the inverse of a previously factored matrix. + + The LU factored N by N matrix. Contains the inverse On exit. + The order of the square matrix . + The pivot indices of . + This is equivalent to the GETRI LAPACK routine. + + + + Solves A*X=B for X using LU factorization. + + The number of columns of B. + The square matrix A. + The order of the square matrix . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRF and GETRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The number of columns of B. + The factored A matrix. + The order of the square matrix . + The pivot indices of . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRS LAPACK routine. + + + + Computes the Cholesky factorization of A. + + On entry, a square, positive definite matrix. On exit, the matrix is overwritten with the + the Cholesky factorization. + The number of rows or columns in the matrix. + This is equivalent to the POTRF LAPACK routine. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves A*X=B for X using Cholesky factorization. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRF add POTRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. Has to be different than . + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRS LAPACK routine. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. Has to be different than . + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The column to solve for. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the R matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A M by M matrix that holds the Q matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the Q matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A N by N matrix that holds the R matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Perform calculation of Q or R + + Work array + Index of column in work array + Q or R matrices + The first row in + The last row + The first column + The last column + Number of available CPUs + + + + Generate column from initial matrix to work array + + Work array + Initial matrix + The number of rows in matrix + The first row + Column index + + + + Solves A*X=B for X using QR factorization of A. + + The A matrix. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Solves A*X=B for X using a previously QR factored matrix. + + The Q matrix obtained by calling . + The R matrix obtained by calling . + The number of rows in the A matrix. + The number of columns in the A matrix. + Contains additional information on Q. Only used for the native solver + and can be null for the managed provider. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Computes the singular value decomposition of A. + + Compute the singular U and VT vectors or not. + On entry, the M by N matrix to decompose. On exit, A may be overwritten. + The number of rows in the A matrix. + The number of columns in the A matrix. + The singular values of A in ascending value. + If is true, on exit U contains the left + singular vectors. + If is true, on exit VT contains the transposed + right singular vectors. + This is equivalent to the GESVD LAPACK routine. + + + + Given the Cartesian coordinates (da, db) of a point p, these function return the parameters da, db, c, and s + associated with the Givens rotation that zeros the y-coordinate of the point. + + Provides the x-coordinate of the point p. On exit contains the parameter r associated with the Givens rotation + Provides the y-coordinate of the point p. On exit contains the parameter z associated with the Givens rotation + Contains the parameter c associated with the Givens rotation + Contains the parameter s associated with the Givens rotation + This is equivalent to the DROTG LAPACK routine. + + + + Solves A*X=B for X using the singular value decomposition of A. + + On entry, the M by N matrix to decompose. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Solves A*X=B for X using a previously SVD decomposed matrix. + + The number of rows in the A matrix. + The number of columns in the A matrix. + The s values returned by . + The left singular vectors returned by . + The right singular vectors returned by . + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Computes the eigenvalues and eigenvectors of a matrix. + + Whether the matrix is symmetric or not. + The order of the matrix. + The matrix to decompose. The lenth of the array must be order * order. + On output, the matrix contains the eigen vectors. The lenth of the array must be order * order. + On output, the eigen values (λ) of matrix in ascending value. The length of the arry must . + On output, the block diagonal eigenvalue matrix. The lenth of the array must be order * order. + + + + Adds a scaled vector to another: result = y + alpha*x. + + The vector to update. + The value to scale by. + The vector to add to . + The result of the addition. + This is similar to the AXPY BLAS routine. + + + + Scales an array. Can be used to scale a vector and a matrix. + + The scalar. + The values to scale. + This result of the scaling. + This is similar to the SCAL BLAS routine. + + + + Conjugates an array. Can be used to conjugate a vector and a matrix. + + The values to conjugate. + This result of the conjugation. + + + + Computes the dot product of x and y. + + The vector x. + The vector y. + The dot product of x and y. + This is equivalent to the DOT BLAS routine. + + + + Does a point wise add of two arrays z = x + y. This can be used + to add vectors or matrices. + + The array x. + The array y. + The result of the addition. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise subtraction of two arrays z = x - y. This can be used + to subtract vectors or matrices. + + The array x. + The array y. + The result of the subtraction. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise multiplication of two arrays z = x * y. This can be used + to multiple elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise multiplication. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise division of two arrays z = x / y. This can be used + to divide elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise division. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise power of two arrays z = x ^ y. This can be used + to raise elements of vectors or matrices to the powers of another vector or matrix. + + The array x. + The array y. + The result of the point wise power. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Computes the requested of the matrix. + + The type of norm to compute. + The number of rows. + The number of columns. + The matrix to compute the norm from. + + The requested of the matrix. + + + + + Multiples two matrices. result = x * y + + The x matrix. + The number of rows in the x matrix. + The number of columns in the x matrix. + The y matrix. + The number of rows in the y matrix. + The number of columns in the y matrix. + Where to store the result of the multiplication. + This is a simplified version of the BLAS GEMM routine with alpha + set to 1.0 and beta set to 0.0, and x and y are not transposed. + + + + Multiplies two matrices and updates another with the result. c = alpha*op(a)*op(b) + beta*c + + How to transpose the matrix. + How to transpose the matrix. + The value to scale matrix. + The a matrix. + The number of rows in the matrix. + The number of columns in the matrix. + The b matrix + The number of rows in the matrix. + The number of columns in the matrix. + The value to scale the matrix. + The c matrix. + + + + Cache-Oblivious Matrix Multiplication + + if set to true transpose matrix A. + if set to true transpose matrix B. + The value to scale the matrix A with. + The matrix A. + Row-shift of the left matrix + Column-shift of the left matrix + The matrix B. + Row-shift of the right matrix + Column-shift of the right matrix + The matrix C. + Row-shift of the result matrix + Column-shift of the result matrix + The number of rows of matrix op(A) and of the matrix C. + The number of columns of matrix op(B) and of the matrix C. + The number of columns of matrix op(A) and the rows of the matrix op(B). + The constant number of rows of matrix op(A) and of the matrix C. + The constant number of columns of matrix op(B) and of the matrix C. + The constant number of columns of matrix op(A) and the rows of the matrix op(B). + Indicates if this is the first recursion. + + + + Computes the LUP factorization of A. P*A = L*U. + + An by matrix. The matrix is overwritten with the + the LU factorization on exit. The lower triangular factor L is stored in under the diagonal of (the diagonal is always 1.0 + for the L factor). The upper triangular factor U is stored on and above the diagonal of . + The order of the square matrix . + On exit, it contains the pivot indices. The size of the array must be . + This is equivalent to the GETRF LAPACK routine. + + + + Computes the inverse of matrix using LU factorization. + + The N by N matrix to invert. Contains the inverse On exit. + The order of the square matrix . + This is equivalent to the GETRF and GETRI LAPACK routines. + + + + Computes the inverse of a previously factored matrix. + + The LU factored N by N matrix. Contains the inverse On exit. + The order of the square matrix . + The pivot indices of . + This is equivalent to the GETRI LAPACK routine. + + + + Solves A*X=B for X using LU factorization. + + The number of columns of B. + The square matrix A. + The order of the square matrix . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRF and GETRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The number of columns of B. + The factored A matrix. + The order of the square matrix . + The pivot indices of . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRS LAPACK routine. + + + + Computes the Cholesky factorization of A. + + On entry, a square, positive definite matrix. On exit, the matrix is overwritten with the + the Cholesky factorization. + The number of rows or columns in the matrix. + This is equivalent to the POTRF LAPACK routine. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves A*X=B for X using Cholesky factorization. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRF add POTRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRS LAPACK routine. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. Has to be different than . + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The column to solve for. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the R matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A M by M matrix that holds the Q matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the Q matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A N by N matrix that holds the R matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Perform calculation of Q or R + + Work array + Index of column in work array + Q or R matrices + The first row in + The last row + The first column + The last column + Number of available CPUs + + + + Generate column from initial matrix to work array + + Work array + Initial matrix + The number of rows in matrix + The first row + Column index + + + + Solves A*X=B for X using QR factorization of A. + + The A matrix. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Solves A*X=B for X using a previously QR factored matrix. + + The Q matrix obtained by calling . + The R matrix obtained by calling . + The number of rows in the A matrix. + The number of columns in the A matrix. + Contains additional information on Q. Only used for the native solver + and can be null for the managed provider. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Computes the singular value decomposition of A. + + Compute the singular U and VT vectors or not. + On entry, the M by N matrix to decompose. On exit, A may be overwritten. + The number of rows in the A matrix. + The number of columns in the A matrix. + The singular values of A in ascending value. + If is true, on exit U contains the left + singular vectors. + If is true, on exit VT contains the transposed + right singular vectors. + This is equivalent to the GESVD LAPACK routine. + + + + Given the Cartesian coordinates (da, db) of a point p, these function return the parameters da, db, c, and s + associated with the Givens rotation that zeros the y-coordinate of the point. + + Provides the x-coordinate of the point p. On exit contains the parameter r associated with the Givens rotation + Provides the y-coordinate of the point p. On exit contains the parameter z associated with the Givens rotation + Contains the parameter c associated with the Givens rotation + Contains the parameter s associated with the Givens rotation + This is equivalent to the DROTG LAPACK routine. + + + + Solves A*X=B for X using the singular value decomposition of A. + + On entry, the M by N matrix to decompose. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Solves A*X=B for X using a previously SVD decomposed matrix. + + The number of rows in the A matrix. + The number of columns in the A matrix. + The s values returned by . + The left singular vectors returned by . + The right singular vectors returned by . + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Computes the eigenvalues and eigenvectors of a matrix. + + Whether the matrix is symmetric or not. + The order of the matrix. + The matrix to decompose. The lenth of the array must be order * order. + On output, the matrix contains the eigen vectors. The lenth of the array must be order * order. + On output, the eigen values (λ) of matrix in ascending value. The length of the arry must . + On output, the block diagonal eigenvalue matrix. The lenth of the array must be order * order. + + + + Multiplicative congruential generator using a modulus of 2^31-1 and a multiplier of 1132489760. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + if set to true, the class is thread safe. + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Multiplicative congruential generator using a modulus of 2^59 and a multiplier of 13^13. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + The seed is set to 1, if the zero is used as the seed. + if set to true , the class is thread safe. + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Random number generator using Mersenne Twister 19937 algorithm. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + Uses the value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + if set to true, the class is thread safe. + + + + Default instance, thread-safe. + + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Returns a random 32-bit signed integer greater than or equal to zero and less than + + + + + Fills the elements of a specified array of bytes with random numbers in full range, including zero and 255 (). + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + A 32-bit combined multiple recursive generator with 2 components of order 3. + + Based off of P. L'Ecuyer, "Combined Multiple Recursive Random Number Generators," Operations Research, 44, 5 (1996), 816--822. + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + if set to true, the class is thread safe. + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Represents a Parallel Additive Lagged Fibonacci pseudo-random number generator. + + + The type bases upon the implementation in the + Boost Random Number Library. + It uses the modulus 232 and by default the "lags" 418 and 1279. Some popular pairs are presented on + Wikipedia - Lagged Fibonacci generator. + + + + + Default value for the ShortLag + + + + + Default value for the LongLag + + + + + The multiplier to compute a double-precision floating point number [0, 1) + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + if set to true, the class is thread safe. + The ShortLag value + TheLongLag value + + + + Gets the short lag of the Lagged Fibonacci pseudo-random number generator. + + + + + Gets the long lag of the Lagged Fibonacci pseudo-random number generator. + + + + + Stores an array of random numbers + + + + + Stores an index for the random number array element that will be accessed next. + + + + + Fills the array with new unsigned random numbers. + + + Generated random numbers are 32-bit unsigned integers greater than or equal to 0 + and less than or equal to . + + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Returns a random 32-bit signed integer greater than or equal to zero and less than + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + This class implements extension methods for the System.Random class. The extension methods generate + pseudo-random distributed numbers for types other than double and int32. + + + + + Fills an array with uniform random numbers greater than or equal to 0.0 and less than 1.0. + + The random number generator. + The array to fill with random values. + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns an array of uniform random numbers greater than or equal to 0.0 and less than 1.0. + + The random number generator. + The size of the array to fill. + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns an infinite sequence of uniform random numbers greater than or equal to 0.0 and less than 1.0. + + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns an array of uniform random bytes. + + The random number generator. + The size of the array to fill. + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Fills an array with uniform random 32-bit signed integers greater than or equal to zero and less than . + + The random number generator. + The array to fill with random values. + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Fills an array with uniform random 32-bit signed integers within the specified range. + + The random number generator. + The array to fill with random values. + Lower bound, inclusive. + Upper bound, exclusive. + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns an infinite sequence of uniform random numbers greater than or equal to 0.0 and less than 1.0. + + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns a nonnegative random number less than . + + The random number generator. + + A 64-bit signed integer greater than or equal to 0, and less than ; that is, + the range of return values includes 0 but not . + + + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns a random number of the full Int32 range. + + The random number generator. + + A 32-bit signed integer of the full range, including 0, negative numbers, + and . + + + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns a random number of the full Int64 range. + + The random number generator. + + A 64-bit signed integer of the full range, including 0, negative numbers, + and . + + + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns a nonnegative decimal floating point random number less than 1.0. + + The random number generator. + + A decimal floating point number greater than or equal to 0.0, and less than 1.0; that is, + the range of return values includes 0.0 but not 1.0. + + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns a random boolean. + + The random number generator. + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Provides a time-dependent seed value, matching the default behavior of System.Random. + WARNING: There is no randomness in this seed and quick repeated calls can cause + the same seed value. Do not use for cryptography! + + + + + Provides a seed based on time and unique GUIDs. + WARNING: There is only low randomness in this seed, but at least quick repeated + calls will result in different seed values. Do not use for cryptography! + + + + + Provides a seed based on an internal random number generator (crypto if available), time and unique GUIDs. + WARNING: There is only medium randomness in this seed, but quick repeated + calls will result in different seed values. Do not use for cryptography! + + + + + Base class for random number generators. This class introduces a layer between + and the Math.Net Numerics random number generators to provide thread safety. + When used directly it use the System.Random as random number source. + + + + + Initializes a new instance of the class using + the value of to set whether + the instance is thread safe or not. + + + + + Initializes a new instance of the class. + + if set to true , the class is thread safe. + Thread safe instances are two and half times slower than non-thread + safe classes. + + + + Fills an array with uniform random numbers greater than or equal to 0.0 and less than 1.0. + + The array to fill with random values. + + + + Returns an array of uniform random numbers greater than or equal to 0.0 and less than 1.0. + + The size of the array to fill. + + + + Returns an infinite sequence of uniform random numbers greater than or equal to 0.0 and less than 1.0. + + + + + Returns a random 32-bit signed integer greater than or equal to zero and less than . + + + + + Returns a random number less then a specified maximum. + + The exclusive upper bound of the random number returned. Range: maxExclusive ≥ 1. + A 32-bit signed integer less than . + is zero or negative. + + + + Returns a random number within a specified range. + + The inclusive lower bound of the random number returned. + The exclusive upper bound of the random number returned. Range: maxExclusive > minExclusive. + + A 32-bit signed integer greater than or equal to and less than ; that is, the range of return values includes but not . If equals , is returned. + + is greater than . + + + + Fills an array with random 32-bit signed integers greater than or equal to zero and less than . + + The array to fill with random values. + + + + Returns an array with random 32-bit signed integers greater than or equal to zero and less than . + + The size of the array to fill. + + + + Fills an array with random numbers within a specified range. + + The array to fill with random values. + The exclusive upper bound of the random number returned. Range: maxExclusive ≥ 1. + + + + Returns an array with random 32-bit signed integers within the specified range. + + The size of the array to fill. + The exclusive upper bound of the random number returned. Range: maxExclusive ≥ 1. + + + + Fills an array with random numbers within a specified range. + + The array to fill with random values. + The inclusive lower bound of the random number returned. + The exclusive upper bound of the random number returned. Range: maxExclusive > minExclusive. + + + + Returns an array with random 32-bit signed integers within the specified range. + + The size of the array to fill. + The inclusive lower bound of the random number returned. + The exclusive upper bound of the random number returned. Range: maxExclusive > minExclusive. + + + + Returns an infinite sequence of random 32-bit signed integers greater than or equal to zero and less than . + + + + + Returns an infinite sequence of random numbers within a specified range. + + The inclusive lower bound of the random number returned. + The exclusive upper bound of the random number returned. Range: maxExclusive > minExclusive. + + + + Fills the elements of a specified array of bytes with random numbers. + + An array of bytes to contain random numbers. + is null. + + + + Returns a random number between 0.0 and 1.0. + + A double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Returns a random 32-bit signed integer greater than or equal to zero and less than 2147483647 (). + + + + + Fills the elements of a specified array of bytes with random numbers in full range, including zero and 255 (). + + + + + Returns a random N-bit signed integer greater than or equal to zero and less than 2^N. + N (bit count) is expected to be greater than zero and less than 32 (not verified). + + + + + Returns a random N-bit signed long integer greater than or equal to zero and less than 2^N. + N (bit count) is expected to be greater than zero and less than 64 (not verified). + + + + + Returns a random 32-bit signed integer within the specified range. + + The exclusive upper bound of the random number returned. Range: maxExclusive ≥ 2 (not verified, must be ensured by caller). + + + + Returns a random 32-bit signed integer within the specified range. + + The inclusive lower bound of the random number returned. + The exclusive upper bound of the random number returned. Range: maxExclusive ≥ minExclusive + 2 (not verified, must be ensured by caller). + + + + A random number generator based on the class in the .NET library. + + + + + Construct a new random number generator with a random seed. + + + + + Construct a new random number generator with random seed. + + if set to true , the class is thread safe. + + + + Construct a new random number generator with random seed. + + The seed value. + + + + Construct a new random number generator with random seed. + + The seed value. + if set to true , the class is thread safe. + + + + Default instance, thread-safe. + + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Returns a random 32-bit signed integer greater than or equal to zero and less than + + + + + Returns a random 32-bit signed integer within the specified range. + + The exclusive upper bound of the random number returned. Range: maxExclusive ≥ 2 (not verified, must be ensured by caller). + + + + Returns a random 32-bit signed integer within the specified range. + + The inclusive lower bound of the random number returned. + The exclusive upper bound of the random number returned. Range: maxExclusive ≥ minExclusive + 2 (not verified, must be ensured by caller). + + + + Fills the elements of a specified array of bytes with random numbers in full range, including zero and 255 (). + + + + + Fill an array with uniform random numbers greater than or equal to 0.0 and less than 1.0. + WARNING: potentially very short random sequence length, can generate repeated partial sequences. + + Parallelized on large length, but also supports being called in parallel from multiple threads + + + + Returns an array of uniform random numbers greater than or equal to 0.0 and less than 1.0. + WARNING: potentially very short random sequence length, can generate repeated partial sequences. + + Parallelized on large length, but also supports being called in parallel from multiple threads + + + + Returns an infinite sequence of uniform random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Wichmann-Hill’s 1982 combined multiplicative congruential generator. + + See: Wichmann, B. A. & Hill, I. D. (1982), "Algorithm AS 183: + An efficient and portable pseudo-random number generator". Applied Statistics 31 (1982) 188-190 + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + The seed is set to 1, if the zero is used as the seed. + if set to true , the class is thread safe. + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Wichmann-Hill’s 2006 combined multiplicative congruential generator. + + See: Wichmann, B. A. & Hill, I. D. (2006), "Generating good pseudo-random numbers". + Computational Statistics & Data Analysis 51:3 (2006) 1614-1622 + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + The seed is set to 1, if the zero is used as the seed. + if set to true , the class is thread safe. + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Implements a multiply-with-carry Xorshift pseudo random number generator (RNG) specified in Marsaglia, George. (2003). Xorshift RNGs. + Xn = a * Xn−3 + c mod 2^32 + http://www.jstatsoft.org/v08/i14/paper + + + + + The default value for X1. + + + + + The default value for X2. + + + + + The default value for the multiplier. + + + + + The default value for the carry over. + + + + + The multiplier to compute a double-precision floating point number [0, 1) + + + + + Seed or last but three unsigned random number. + + + + + Last but two unsigned random number. + + + + + Last but one unsigned random number. + + + + + The value of the carry over. + + + + + The multiplier. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + Uses the default values of: + + a = 916905990 + c = 13579 + X1 = 77465321 + X2 = 362436069 + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + The multiply value + The initial carry value. + The initial value if X1. + The initial value if X2. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + Note: must be less than . + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + Uses the default values of: + + a = 916905990 + c = 13579 + X1 = 77465321 + X2 = 362436069 + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + The multiply value + The initial carry value. + The initial value if X1. + The initial value if X2. + must be less than . + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + Uses the default values of: + + a = 916905990 + c = 13579 + X1 = 77465321 + X2 = 362436069 + + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + The multiply value + The initial carry value. + The initial value if X1. + The initial value if X2. + must be less than . + + + + Initializes a new instance of the class. + + The seed value. + if set to true, the class is thread safe. + + Uses the default values of: + + a = 916905990 + c = 13579 + X1 = 77465321 + X2 = 362436069 + + + + + Initializes a new instance of the class. + + The seed value. + if set to true, the class is thread safe. + The multiply value + The initial carry value. + The initial value if X1. + The initial value if X2. + must be less than . + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Returns a random 32-bit signed integer greater than or equal to zero and less than + + + + + Fills the elements of a specified array of bytes with random numbers in full range, including zero and 255 (). + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Bisection root-finding algorithm. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + Guess for the low value of the range where the root is supposed to be. Will be expanded if needed. + Guess for the high value of the range where the root is supposed to be. Will be expanded if needed. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Factor at which to expand the bounds, if needed. Default 1.6. + Maximum number of expand iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy for both the root and the function value at the root. The root will be refined until the accuracy or the maximum number of iterations is reached. + Maximum number of iterations. Usually 100. + The root that was found, if any. Undefined if the function returns false. + True if a root with the specified accuracy was found, else false. + + + + Algorithm by by Brent, Van Wijngaarden, Dekker et al. + Implementation inspired by Press, Teukolsky, Vetterling, and Flannery, "Numerical Recipes in C", 2nd edition, Cambridge University Press + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + Guess for the low value of the range where the root is supposed to be. Will be expanded if needed. + Guess for the high value of the range where the root is supposed to be. Will be expanded if needed. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Factor at which to expand the bounds, if needed. Default 1.6. + Maximum number of expand iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. + Maximum number of iterations. Usually 100. + The root that was found, if any. Undefined if the function returns false. + True if a root with the specified accuracy was found, else false. + + + Helper method useful for preventing rounding errors. + a*sign(b) + + + + Algorithm by Broyden. + Implementation inspired by Press, Teukolsky, Vetterling, and Flannery, "Numerical Recipes in C", 2nd edition, Cambridge University Press + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + Initial guess of the root. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + Initial guess of the root. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. + Maximum number of iterations. Usually 100. + The root that was found, if any. Undefined if the function returns false. + True if a root with the specified accuracy was found, else false. + + + + Helper method to calculate an approximation of the Jacobian. + + The function. + The argument (initial guess). + The result (of initial guess). + + + + Finds roots to the cubic equation x^3 + a2*x^2 + a1*x + a0 = 0 + Implements the cubic formula in http://mathworld.wolfram.com/CubicFormula.html + + + + + Q and R are transformed variables. + + + + + n^(1/3) - work around a negative double raised to (1/3) + + + + + Find all real-valued roots of the cubic equation a0 + a1*x + a2*x^2 + x^3 = 0. + Note the special coefficient order ascending by exponent (consistent with polynomials). + + + + + Find all three complex roots of the cubic equation d + c*x + b*x^2 + a*x^3 = 0. + Note the special coefficient order ascending by exponent (consistent with polynomials). + + + + + Pure Newton-Raphson root-finding algorithm without any recovery measures in cases it behaves badly. + The algorithm aborts immediately if the root leaves the bound interval. + + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first derivative of the function to find roots from. + The low value of the range where the root is supposed to be. Aborts if it leaves the interval. + The high value of the range where the root is supposed to be. Aborts if it leaves the interval. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first derivative of the function to find roots from. + Initial guess of the root. + The low value of the range where the root is supposed to be. Aborts if it leaves the interval. Default MinValue. + The high value of the range where the root is supposed to be. Aborts if it leaves the interval. Default MaxValue. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first derivative of the function to find roots from. + Initial guess of the root. + The low value of the range where the root is supposed to be. Aborts if it leaves the interval. + The high value of the range where the root is supposed to be. Aborts if it leaves the interval. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Example: 1e-14. + Maximum number of iterations. Example: 100. + The root that was found, if any. Undefined if the function returns false. + True if a root with the specified accuracy was found, else false. + + + + Robust Newton-Raphson root-finding algorithm that falls back to bisection when overshooting or converging too slow, or to subdivision on lacking bracketing. + + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first derivative of the function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + How many parts an interval should be split into for zero crossing scanning in case of lacking bracketing. Default 20. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first derivative of the function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Example: 1e-14. + Maximum number of iterations. Example: 100. + How many parts an interval should be split into for zero crossing scanning in case of lacking bracketing. Example: 20. + The root that was found, if any. Undefined if the function returns false. + True if a root with the specified accuracy was found, else false. + + + + Pure Secant root-finding algorithm without any recovery measures in cases it behaves badly. + The algorithm aborts immediately if the root leaves the bound interval. + + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first guess of the root within the bounds specified. + The second guess of the root within the bounds specified. + The low value of the range where the root is supposed to be. Aborts if it leaves the interval. Default MinValue. + The high value of the range where the root is supposed to be. Aborts if it leaves the interval. Default MaxValue. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first guess of the root within the bounds specified. + The second guess of the root within the bounds specified. + The low value of the range where the root is supposed to be. Aborts if it leaves the interval. + The low value of the range where the root is supposed to be. Aborts if it leaves the interval. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Example: 1e-14. + Maximum number of iterations. Example: 100. + The root that was found, if any. Undefined if the function returns false. + True if a root with the specified accuracy was found, else false + + + Detect a range containing at least one root. + The function to detect roots from. + Lower value of the range. + Upper value of the range + The growing factor of research. Usually 1.6. + Maximum number of iterations. Usually 50. + True if the bracketing operation succeeded, false otherwise. + This iterative methods stops when two values with opposite signs are found. + + + + Sorting algorithms for single, tuple and triple lists. + + + + + Sort a list of keys, in place using the quick sort algorithm using the quick sort algorithm. + + The type of elements in the key list. + List to sort. + Comparison, defining the sort order. + + + + Sort a list of keys and items with respect to the keys, in place using the quick sort algorithm. + + The type of elements in the key list. + The type of elements in the item list. + List to sort. + List to permute the same way as the key list. + Comparison, defining the sort order. + + + + Sort a list of keys, items1 and items2 with respect to the keys, in place using the quick sort algorithm. + + The type of elements in the key list. + The type of elements in the first item list. + The type of elements in the second item list. + List to sort. + First list to permute the same way as the key list. + Second list to permute the same way as the key list. + Comparison, defining the sort order. + + + + Sort a range of a list of keys, in place using the quick sort algorithm. + + The type of element in the list. + List to sort. + The zero-based starting index of the range to sort. + The length of the range to sort. + Comparison, defining the sort order. + + + + Sort a list of keys and items with respect to the keys, in place using the quick sort algorithm. + + The type of elements in the key list. + The type of elements in the item list. + List to sort. + List to permute the same way as the key list. + The zero-based starting index of the range to sort. + The length of the range to sort. + Comparison, defining the sort order. + + + + Sort a list of keys, items1 and items2 with respect to the keys, in place using the quick sort algorithm. + + The type of elements in the key list. + The type of elements in the first item list. + The type of elements in the second item list. + List to sort. + First list to permute the same way as the key list. + Second list to permute the same way as the key list. + The zero-based starting index of the range to sort. + The length of the range to sort. + Comparison, defining the sort order. + + + + Sort a list of keys and items with respect to the keys, in place using the quick sort algorithm. + + The type of elements in the primary list. + The type of elements in the secondary list. + List to sort. + List to sort on duplicate primary items, and permute the same way as the key list. + Comparison, defining the primary sort order. + Comparison, defining the secondary sort order. + + + + Recursive implementation for an in place quick sort on a list. + + The type of the list on which the quick sort is performed. + The list which is sorted using quick sort. + The method with which to compare two elements of the quick sort. + The left boundary of the quick sort. + The right boundary of the quick sort. + + + + Recursive implementation for an in place quick sort on a list while reordering one other list accordingly. + + The type of the list on which the quick sort is performed. + The type of the list which is automatically reordered accordingly. + The list which is sorted using quick sort. + The list which is automatically reordered accordingly. + The method with which to compare two elements of the quick sort. + The left boundary of the quick sort. + The right boundary of the quick sort. + + + + Recursive implementation for an in place quick sort on one list while reordering two other lists accordingly. + + The type of the list on which the quick sort is performed. + The type of the first list which is automatically reordered accordingly. + The type of the second list which is automatically reordered accordingly. + The list which is sorted using quick sort. + The first list which is automatically reordered accordingly. + The second list which is automatically reordered accordingly. + The method with which to compare two elements of the quick sort. + The left boundary of the quick sort. + The right boundary of the quick sort. + + + + Recursive implementation for an in place quick sort on the primary and then by the secondary list while reordering one secondary list accordingly. + + The type of the primary list. + The type of the secondary list. + The list which is sorted using quick sort. + The list which is sorted secondarily (on primary duplicates) and automatically reordered accordingly. + The method with which to compare two elements of the primary list. + The method with which to compare two elements of the secondary list. + The left boundary of the quick sort. + The right boundary of the quick sort. + + + + Performs an in place swap of two elements in a list. + + The type of elements stored in the list. + The list in which the elements are stored. + The index of the first element of the swap. + The index of the second element of the swap. + + + + This partial implementation of the SpecialFunctions class contains all methods related to the error function. + + + This partial implementation of the SpecialFunctions class contains all methods related to the harmonic function. + + + This partial implementation of the SpecialFunctions class contains all methods related to the logistic function. + + + This partial implementation of the SpecialFunctions class contains all methods related to the modified bessel function. + + + This partial implementation of the SpecialFunctions class contains all methods related to the modified bessel function. + + + + + Computes the logarithm of the Euler Beta function. + + The first Beta parameter, a positive real number. + The second Beta parameter, a positive real number. + The logarithm of the Euler Beta function evaluated at z,w. + If or are not positive. + + + + Computes the Euler Beta function. + + The first Beta parameter, a positive real number. + The second Beta parameter, a positive real number. + The Euler Beta function evaluated at z,w. + If or are not positive. + + + + Returns the lower incomplete (unregularized) beta function + B(a,b,x) = int(t^(a-1)*(1-t)^(b-1),t=0..x) for real a > 0, b > 0, 1 >= x >= 0. + + The first Beta parameter, a positive real number. + The second Beta parameter, a positive real number. + The upper limit of the integral. + The lower incomplete (unregularized) beta function. + + + + Returns the regularized lower incomplete beta function + I_x(a,b) = 1/Beta(a,b) * int(t^(a-1)*(1-t)^(b-1),t=0..x) for real a > 0, b > 0, 1 >= x >= 0. + + The first Beta parameter, a positive real number. + The second Beta parameter, a positive real number. + The upper limit of the integral. + The regularized lower incomplete beta function. + + + + ************************************** + COEFFICIENTS FOR METHOD ErfImp * + ************************************** + + Polynomial coefficients for a numerator of ErfImp + calculation for Erf(x) in the interval [1e-10, 0.5]. + + + + Polynomial coefficients for adenominator of ErfImp + calculation for Erf(x) in the interval [1e-10, 0.5]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [0.5, 0.75]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [0.5, 0.75]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [0.75, 1.25]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [0.75, 1.25]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [1.25, 2.25]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [1.25, 2.25]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [2.25, 3.5]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [2.25, 3.5]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [3.5, 5.25]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [3.5, 5.25]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [5.25, 8]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [5.25, 8]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [8, 11.5]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [8, 11.5]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [11.5, 17]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [11.5, 17]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [17, 24]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [17, 24]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [24, 38]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [24, 38]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [38, 60]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [38, 60]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [60, 85]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [60, 85]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [85, 110]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [85, 110]. + + + + + ************************************** + COEFFICIENTS FOR METHOD ErfInvImp * + ************************************** + + Polynomial coefficients for a numerator of ErfInvImp + calculation for Erf^-1(z) in the interval [0, 0.5]. + + + + Polynomial coefficients for a denominator of ErfInvImp + calculation for Erf^-1(z) in the interval [0, 0.5]. + + + + Polynomial coefficients for a numerator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.5, 0.75]. + + + + Polynomial coefficients for a denominator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.5, 0.75]. + + + + Polynomial coefficients for a numerator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x less than 3. + + + + Polynomial coefficients for a denominator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x less than 3. + + + + Polynomial coefficients for a numerator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x between 3 and 6. + + + + Polynomial coefficients for a denominator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x between 3 and 6. + + + + Polynomial coefficients for a numerator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x between 6 and 18. + + + + Polynomial coefficients for a denominator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x between 6 and 18. + + + + Polynomial coefficients for a numerator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x between 18 and 44. + + + + Polynomial coefficients for a denominator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x between 18 and 44. + + + + Polynomial coefficients for a numerator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x greater than 44. + + + + Polynomial coefficients for a denominator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x greater than 44. + + + + Calculates the error function. + The value to evaluate. + the error function evaluated at given value. + + + returns 1 if x == double.PositiveInfinity. + returns -1 if x == double.NegativeInfinity. + + + + + Calculates the complementary error function. + The value to evaluate. + the complementary error function evaluated at given value. + + + returns 0 if x == double.PositiveInfinity. + returns 2 if x == double.NegativeInfinity. + + + + + Calculates the inverse error function evaluated at z. + The inverse error function evaluated at given value. + + + returns double.PositiveInfinity if z >= 1.0. + returns double.NegativeInfinity if z <= -1.0. + + + Calculates the inverse error function evaluated at z. + value to evaluate. + the inverse error function evaluated at Z. + + + + Implementation of the error function. + + Where to evaluate the error function. + Whether to compute 1 - the error function. + the error function. + + + Calculates the complementary inverse error function evaluated at z. + The complementary inverse error function evaluated at given value. + We have tested this implementation against the arbitrary precision mpmath library + and found cases where we can only guarantee 9 significant figures correct. + + returns double.PositiveInfinity if z <= 0.0. + returns double.NegativeInfinity if z >= 2.0. + + + calculates the complementary inverse error function evaluated at z. + value to evaluate. + the complementary inverse error function evaluated at Z. + + + + The implementation of the inverse error function. + + First intermediate parameter. + Second intermediate parameter. + Third intermediate parameter. + the inverse error function. + + + + Computes the generalized Exponential Integral function (En). + + The argument of the Exponential Integral function. + Integer power of the denominator term. Generalization index. + The value of the Exponential Integral function. + + This implementation of the computation of the Exponential Integral function follows the derivation in + "Handbook of Mathematical Functions, Applied Mathematics Series, Volume 55", Abramowitz, M., and Stegun, I.A. 1964, reprinted 1968 by + Dover Publications, New York), Chapters 6, 7, and 26. + AND + "Advanced mathematical methods for scientists and engineers", Bender, Carl M.; Steven A. Orszag (1978). page 253 + + + for x > 1 uses continued fraction approach that is often used to compute incomplete gamma. + for 0 < x <= 1 uses Taylor series expansion + + Our unit tests suggest that the accuracy of the Exponential Integral function is correct up to 13 floating point digits. + + + + + Initializes static members of the SpecialFunctions class. + + + + + Computes the factorial function x -> x! of an integer number > 0. The function can represent all number up + to 22! exactly, all numbers up to 170! using a double representation. All larger values will overflow. + + A value value! for value > 0 + + If you need to multiply or divide various such factorials, consider using the logarithmic version + instead so you can add instead of multiply and subtract instead of divide, and + then exponentiate the result using . This will also circumvent the problem that + factorials become very large even for small parameters. + + + + + + Computes the factorial of an integer. + + + + + Computes the logarithmic factorial function x -> ln(x!) of an integer number > 0. + + A value value! for value > 0 + + + + Computes the binomial coefficient: n choose k. + + A nonnegative value n. + A nonnegative value h. + The binomial coefficient: n choose k. + + + + Computes the natural logarithm of the binomial coefficient: ln(n choose k). + + A nonnegative value n. + A nonnegative value h. + The logarithmic binomial coefficient: ln(n choose k). + + + + Computes the multinomial coefficient: n choose n1, n2, n3, ... + + A nonnegative value n. + An array of nonnegative values that sum to . + The multinomial coefficient. + if is . + If or any of the are negative. + If the sum of all is not equal to . + + + + The order of the approximation. + + + + + Auxiliary variable when evaluating the function. + + + + + Polynomial coefficients for the approximation. + + + + + Computes the logarithm of the Gamma function. + + The argument of the gamma function. + The logarithm of the gamma function. + + This implementation of the computation of the gamma and logarithm of the gamma function follows the derivation in + "An Analysis Of The Lanczos Gamma Approximation", Glendon Ralph Pugh, 2004. + We use the implementation listed on p. 116 which achieves an accuracy of 16 floating point digits. Although 16 digit accuracy + should be sufficient for double values, improving accuracy is possible (see p. 126 in Pugh). + Our unit tests suggest that the accuracy of the Gamma function is correct up to 14 floating point digits. + + + + + Computes the Gamma function. + + The argument of the gamma function. + The logarithm of the gamma function. + + + This implementation of the computation of the gamma and logarithm of the gamma function follows the derivation in + "An Analysis Of The Lanczos Gamma Approximation", Glendon Ralph Pugh, 2004. + We use the implementation listed on p. 116 which should achieve an accuracy of 16 floating point digits. Although 16 digit accuracy + should be sufficient for double values, improving accuracy is possible (see p. 126 in Pugh). + + Our unit tests suggest that the accuracy of the Gamma function is correct up to 13 floating point digits. + + + + + Returns the upper incomplete regularized gamma function + Q(a,x) = 1/Gamma(a) * int(exp(-t)t^(a-1),t=0..x) for real a > 0, x > 0. + + The argument for the gamma function. + The lower integral limit. + The upper incomplete regularized gamma function. + + + + Returns the upper incomplete gamma function + Gamma(a,x) = int(exp(-t)t^(a-1),t=0..x) for real a > 0, x > 0. + + The argument for the gamma function. + The lower integral limit. + The upper incomplete gamma function. + + + + Returns the lower incomplete gamma function + gamma(a,x) = int(exp(-t)t^(a-1),t=0..x) for real a > 0, x > 0. + + The argument for the gamma function. + The upper integral limit. + The lower incomplete gamma function. + + + + Returns the lower incomplete regularized gamma function + P(a,x) = 1/Gamma(a) * int(exp(-t)t^(a-1),t=0..x) for real a > 0, x > 0. + + The argument for the gamma function. + The upper integral limit. + The lower incomplete gamma function. + + + + Returns the inverse P^(-1) of the regularized lower incomplete gamma function + P(a,x) = 1/Gamma(a) * int(exp(-t)t^(a-1),t=0..x) for real a > 0, x > 0, + such that P^(-1)(a,P(a,x)) == x. + + + + + Computes the Digamma function which is mathematically defined as the derivative of the logarithm of the gamma function. + This implementation is based on + Jose Bernardo + Algorithm AS 103: + Psi ( Digamma ) Function, + Applied Statistics, + Volume 25, Number 3, 1976, pages 315-317. + Using the modifications as in Tom Minka's lightspeed toolbox. + + The argument of the digamma function. + The value of the DiGamma function at . + + + + Computes the inverse Digamma function: this is the inverse of the logarithm of the gamma function. This function will + only return solutions that are positive. + This implementation is based on the bisection method. + + The argument of the inverse digamma function. + The positive solution to the inverse DiGamma function at . + + + + Computes the 'th Harmonic number. + + The Harmonic number which needs to be computed. + The t'th Harmonic number. + + + + Compute the generalized harmonic number of order n of m. (1 + 1/2^m + 1/3^m + ... + 1/n^m) + + The order parameter. + The power parameter. + General Harmonic number. + + + + Computes the logistic function. see: http://en.wikipedia.org/wiki/Logistic + + The parameter for which to compute the logistic function. + The logistic function of . + + + + Computes the logit function, the inverse of the sigmoid logistic function. see: http://en.wikipedia.org/wiki/Logit + + The parameter for which to compute the logit function. This number should be + between 0 and 1. + The logarithm of divided by 1.0 - . + + + + ************************************** + COEFFICIENTS FOR METHODS bessi0 * + ************************************** + + Chebyshev coefficients for exp(-x) I0(x) + in the interval [0, 8]. + + lim(x->0){ exp(-x) I0(x) } = 1. + + + + Chebyshev coefficients for exp(-x) sqrt(x) I0(x) + in the inverted interval [8, infinity]. + + lim(x->inf){ exp(-x) sqrt(x) I0(x) } = 1/sqrt(2pi). + + + + + ************************************** + COEFFICIENTS FOR METHODS bessi1 * + ************************************** + + Chebyshev coefficients for exp(-x) I1(x) / x + in the interval [0, 8]. + + lim(x->0){ exp(-x) I1(x) / x } = 1/2. + + + + Chebyshev coefficients for exp(-x) sqrt(x) I1(x) + in the inverted interval [8, infinity]. + + lim(x->inf){ exp(-x) sqrt(x) I1(x) } = 1/sqrt(2pi). + + + + + ************************************** + COEFFICIENTS FOR METHODS bessk0, bessk0e * + ************************************** + + Chebyshev coefficients for K0(x) + log(x/2) I0(x) + in the interval [0, 2]. The odd order coefficients are all + zero; only the even order coefficients are listed. + + lim(x->0){ K0(x) + log(x/2) I0(x) } = -EUL. + + + + Chebyshev coefficients for exp(x) sqrt(x) K0(x) + in the inverted interval [2, infinity]. + + lim(x->inf){ exp(x) sqrt(x) K0(x) } = sqrt(pi/2). + + + + + ************************************** + COEFFICIENTS FOR METHODS bessk1, bessk1e * + ************************************** + + Chebyshev coefficients for x(K1(x) - log(x/2) I1(x)) + in the interval [0, 2]. + + lim(x->0){ x(K1(x) - log(x/2) I1(x)) } = 1. + + + + Chebyshev coefficients for exp(x) sqrt(x) K1(x) + in the interval [2, infinity]. + + lim(x->inf){ exp(x) sqrt(x) K1(x) } = sqrt(pi/2). + + + + Returns the modified Bessel function of first kind, order 0 of the argument. +

+ The function is defined as i0(x) = j0( ix ). +

+ The range is partitioned into the two intervals [0, 8] and + (8, infinity). Chebyshev polynomial expansions are employed + in each interval. +

+ The value to compute the bessel function of. + +
+ + Returns the modified Bessel function of first kind, + order 1 of the argument. +

+ The function is defined as i1(x) = -i j1( ix ). +

+ The range is partitioned into the two intervals [0, 8] and + (8, infinity). Chebyshev polynomial expansions are employed + in each interval. +

+ The value to compute the bessel function of. + +
+ + Returns the modified Bessel function of the second kind + of order 0 of the argument. +

+ The range is partitioned into the two intervals [0, 8] and + (8, infinity). Chebyshev polynomial expansions are employed + in each interval. +

+ The value to compute the bessel function of. + +
+ + Returns the exponentially scaled modified Bessel function + of the second kind of order 0 of the argument. + + The value to compute the bessel function of. + + + + Returns the modified Bessel function of the second kind + of order 1 of the argument. +

+ The range is partitioned into the two intervals [0, 2] and + (2, infinity). Chebyshev polynomial expansions are employed + in each interval. +

+ The value to compute the bessel function of. + +
+ + Returns the exponentially scaled modified Bessel function + of the second kind of order 1 of the argument. +

+ k1e(x) = exp(x) * k1(x). +

+ The value to compute the bessel function of. + +
+ + + Returns the modified Struve function of order 0. + + The value to compute the function of. + + + + Returns the modified Struve function of order 1. + + The value to compute the function of. + + + + Returns the difference between the Bessel I0 and Struve L0 functions. + + The value to compute the function of. + + + + Returns the difference between the Bessel I1 and Struve L1 functions. + + The value to compute the function of. + + + + Numerically stable exponential minus one, i.e. x -> exp(x)-1 + + A number specifying a power. + Returns exp(power)-1. + + + + Numerically stable hypotenuse of a right angle triangle, i.e. (a,b) -> sqrt(a^2 + b^2) + + The length of side a of the triangle. + The length of side b of the triangle. + Returns sqrt(a2 + b2) without underflow/overflow. + + + + Numerically stable hypotenuse of a right angle triangle, i.e. (a,b) -> sqrt(a^2 + b^2) + + The length of side a of the triangle. + The length of side b of the triangle. + Returns sqrt(a2 + b2) without underflow/overflow. + + + + Numerically stable hypotenuse of a right angle triangle, i.e. (a,b) -> sqrt(a^2 + b^2) + + The length of side a of the triangle. + The length of side b of the triangle. + Returns sqrt(a2 + b2) without underflow/overflow. + + + + Numerically stable hypotenuse of a right angle triangle, i.e. (a,b) -> sqrt(a^2 + b^2) + + The length of side a of the triangle. + The length of side b of the triangle. + Returns sqrt(a2 + b2) without underflow/overflow. + + + + Evaluation functions, useful for function approximation. + + + + + Evaluate a polynomial at point x. + Coefficients are ordered by power with power k at index k. + Example: coefficients [3,-1,2] represent y=2x^2-x+3. + + The location where to evaluate the polynomial at. + The coefficients of the polynomial, coefficient for power k at index k. + + + + Evaluate a polynomial at point x. + Coefficients are ordered by power with power k at index k. + Example: coefficients [3,-1,2] represent y=2x^2-x+3. + + The location where to evaluate the polynomial at. + The coefficients of the polynomial, coefficient for power k at index k. + + + + Evaluate a polynomial at point x. + Coefficients are ordered by power with power k at index k. + Example: coefficients [3,-1,2] represent y=2x^2-x+3. + + The location where to evaluate the polynomial at. + The coefficients of the polynomial, coefficient for power k at index k. + + + + Numerically stable series summation + + provides the summands sequentially + Sum + + + Evaluates the series of Chebyshev polynomials Ti at argument x/2. + The series is given by +
+                  N-1
+                   - '
+            y  =   >   coef[i] T (x/2)
+                   -            i
+                  i=0
+            
+ Coefficients are stored in reverse order, i.e. the zero + order term is last in the array. Note N is the number of + coefficients, not the order. +

+ If coefficients are for the interval a to b, x must + have been transformed to x -> 2(2x - b - a)/(b-a) before + entering the routine. This maps x from (a, b) to (-1, 1), + over which the Chebyshev polynomials are defined. +

+ If the coefficients are for the inverted interval, in + which (a, b) is mapped to (1/b, 1/a), the transformation + required is x -> 2(2ab/x - b - a)/(b-a). If b is infinity, + this becomes x -> 4a/x - 1. +

+ SPEED: +

+ Taking advantage of the recurrence properties of the + Chebyshev polynomials, the routine requires one more + addition per loop than evaluating a nested polynomial of + the same degree. +

+ The coefficients of the polynomial. + Argument to the polynomial. + + Reference: https://bpm2.svn.codeplex.com/svn/Common.Numeric/Arithmetic.cs +

+ Marked as Deprecated in + http://people.apache.org/~isabel/mahout_site/mahout-matrix/apidocs/org/apache/mahout/jet/math/Arithmetic.html + + + +

+ Summation of Chebyshev polynomials, using the Clenshaw method with Reinsch modification. + + The no. of terms in the sequence. + The coefficients of the Chebyshev series, length n+1. + The value at which the series is to be evaluated. + + ORIGINAL AUTHOR: + Dr. Allan J. MacLeod; Dept. of Mathematics and Statistics, University of Paisley; High St., PAISLEY, SCOTLAND + REFERENCES: + "An error analysis of the modified Clenshaw method for evaluating Chebyshev and Fourier series" + J. Oliver, J.I.M.A., vol. 20, 1977, pp379-391 + +
+ + + Valley-shaped Rosenbrock function for 2 dimensions: (x,y) -> (1-x)^2 + 100*(y-x^2)^2. + This function has a global minimum at (1,1) with f(1,1) = 0. + Common range: [-5,10] or [-2.048,2.048]. + + + https://en.wikipedia.org/wiki/Rosenbrock_function + http://www.sfu.ca/~ssurjano/rosen.html + + + + + Valley-shaped Rosenbrock function for 2 or more dimensions. + This function have a global minimum of all ones and, for 8 > N > 3, a local minimum at (-1,1,...,1). + + + https://en.wikipedia.org/wiki/Rosenbrock_function + http://www.sfu.ca/~ssurjano/rosen.html + + + + + Himmelblau, a multi-modal function: (x,y) -> (x^2+y-11)^2 + (x+y^2-7)^2 + This function has 4 global minima with f(x,y) = 0. + Common range: [-6,6]. + Named after David Mautner Himmelblau + + + https://en.wikipedia.org/wiki/Himmelblau%27s_function + + + + + Rastrigin, a highly multi-modal function with many local minima. + Global minimum of all zeros with f(0) = 0. + Common range: [-5.12,5.12]. + + + https://en.wikipedia.org/wiki/Rastrigin_function + http://www.sfu.ca/~ssurjano/rastr.html + + + + + Drop-Wave, a multi-modal and highly complex function with many local minima. + Global minimum of all zeros with f(0) = -1. + Common range: [-5.12,5.12]. + + + http://www.sfu.ca/~ssurjano/drop.html + + + + + Ackley, a function with many local minima. It is nearly flat in outer regions but has a large hole at the center. + Global minimum of all zeros with f(0) = 0. + Common range: [-32.768, 32.768]. + + + http://www.sfu.ca/~ssurjano/ackley.html + + + + + Bowl-shaped first Bohachevsky function. + Global minimum of all zeros with f(0,0) = 0. + Common range: [-100, 100] + + + http://www.sfu.ca/~ssurjano/boha.html + + + + + Plate-shaped Matyas function. + Global minimum of all zeros with f(0,0) = 0. + Common range: [-10, 10]. + + + http://www.sfu.ca/~ssurjano/matya.html + + + + + Valley-shaped six-hump camel back function. + Two global minima and four local minima. Global minima with f(x) ) -1.0316 at (0.0898,-0.7126) and (-0.0898,0.7126). + Common range: x in [-3,3], y in [-2,2]. + + + http://www.sfu.ca/~ssurjano/camel6.html + + + + + Statistics operating on arrays assumed to be unsorted. + WARNING: Methods with the Inplace-suffix may modify the data array by reordering its entries. + + + + + + + + Returns the smallest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the smallest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the largest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the largest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the smallest value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the largest value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the smallest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the largest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the geometric mean of the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the harmonic mean of the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population variance from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the population variance from the full population provided as unsorted array. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population standard deviation from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the population standard deviation from the full population provided as unsorted array. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population variance from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN and NaN for variance if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population standard deviation from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN and NaN for standard deviation if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population covariance from the provided two sample arrays. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + First sample array. + Second sample array. + + + + Evaluates the population covariance from the full population provided as two arrays. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + First population array. + Second population array. + + + + Estimates the root mean square (RMS) also known as quadratic mean from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the order statistic (order 1..N) from the unsorted data array. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + One-based order of the statistic, must be between 1 and N (inclusive). + + + + Estimates the median value from the unsorted data array. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the p-Percentile value from the unsorted data array. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Percentile selector, between 0 and 100 (inclusive). + + + + Estimates the first quartile value from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the third quartile value from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the inter-quartile range from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates {min, lower-quantile, median, upper-quantile, max} from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the tau-th quantile from the unsorted data array. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Quantile selector, between 0.0 and 1.0 (inclusive). + + R-8, SciPy-(1/3,1/3): + Linear interpolation of the approximate medians for order statistics. + When tau < (2/3) / (N + 1/3), use x1. When tau >= (N - 1/3) / (N + 1/3), use xN. + + + + + Estimates the tau-th quantile from the unsorted data array. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile defintion can be specified + by 4 parameters a, b, c and d, consistent with Mathematica. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Quantile selector, between 0.0 and 1.0 (inclusive) + a-parameter + b-parameter + c-parameter + d-parameter + + + + Estimates the tau-th quantile from the unsorted data array. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Quantile selector, between 0.0 and 1.0 (inclusive) + Quantile definition, to choose what product/definition it should be consistent with + + + + Evaluates the rank of each entry of the unsorted data array. + The rank definition can be specified to be compatible + with an existing system. + WARNING: Works inplace and can thus causes the data array to be reordered. + + + + + Estimates the arithmetic sample mean from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the geometric mean of the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the harmonic mean of the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population variance from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the population variance from the full population provided as unsorted array. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population standard deviation from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the population standard deviation from the full population provided as unsorted array. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population variance from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN and NaN for variance if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population standard deviation from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN and NaN for standard deviation if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population covariance from the provided two sample arrays. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + First sample array. + Second sample array. + + + + Evaluates the population covariance from the full population provided as two arrays. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + First population array. + Second population array. + + + + Estimates the root mean square (RMS) also known as quadratic mean from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the smallest value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the smallest value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the smallest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the largest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the geometric mean of the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the harmonic mean of the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population variance from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the population variance from the full population provided as unsorted array. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population standard deviation from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the population standard deviation from the full population provided as unsorted array. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population variance from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN and NaN for variance if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population standard deviation from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN and NaN for standard deviation if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population covariance from the provided two sample arrays. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + First sample array. + Second sample array. + + + + Evaluates the population covariance from the full population provided as two arrays. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + First population array. + Second population array. + + + + Estimates the root mean square (RMS) also known as quadratic mean from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the order statistic (order 1..N) from the unsorted data array. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + One-based order of the statistic, must be between 1 and N (inclusive). + + + + Estimates the median value from the unsorted data array. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the p-Percentile value from the unsorted data array. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Percentile selector, between 0 and 100 (inclusive). + + + + Estimates the first quartile value from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the third quartile value from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the inter-quartile range from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates {min, lower-quantile, median, upper-quantile, max} from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the tau-th quantile from the unsorted data array. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Quantile selector, between 0.0 and 1.0 (inclusive). + + R-8, SciPy-(1/3,1/3): + Linear interpolation of the approximate medians for order statistics. + When tau < (2/3) / (N + 1/3), use x1. When tau >= (N - 1/3) / (N + 1/3), use xN. + + + + + Estimates the tau-th quantile from the unsorted data array. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile defintion can be specified + by 4 parameters a, b, c and d, consistent with Mathematica. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Quantile selector, between 0.0 and 1.0 (inclusive) + a-parameter + b-parameter + c-parameter + d-parameter + + + + Estimates the tau-th quantile from the unsorted data array. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Quantile selector, between 0.0 and 1.0 (inclusive) + Quantile definition, to choose what product/definition it should be consistent with + + + + Evaluates the rank of each entry of the unsorted data array. + The rank definition can be specified to be compatible + with an existing system. + WARNING: Works inplace and can thus causes the data array to be reordered. + + + + + A class with correlation measures between two datasets. + + + + + Computes the Pearson Product-Moment Correlation coefficient. + + Sample data A. + Sample data B. + The Pearson product-moment correlation coefficient. + + + + Computes the Weighted Pearson Product-Moment Correlation coefficient. + + Sample data A. + Sample data B. + Corresponding weights of data. + The Weighted Pearson product-moment correlation coefficient. + + + + Computes the Pearson Product-Moment Correlation matrix. + + Array of sample data vectors. + The Pearson product-moment correlation matrix. + + + + Computes the Pearson Product-Moment Correlation matrix. + + Enumerable of sample data vectors. + The Pearson product-moment correlation matrix. + + + + Computes the Spearman Ranked Correlation coefficient. + + Sample data series A. + Sample data series B. + The Spearman ranked correlation coefficient. + + + + Computes the Spearman Ranked Correlation matrix. + + Array of sample data vectors. + The Spearman ranked correlation matrix. + + + + Computes the Spearman Ranked Correlation matrix. + + Enumerable of sample data vectors. + The Spearman ranked correlation matrix. + + + + Computes the basic statistics of data set. The class meets the + NIST standard of accuracy for mean, variance, and standard deviation + (the only statistics they provide exact values for) and exceeds them + in increased accuracy mode. + Recommendation: consider to use RunningStatistics instead. + + + This type declares a DataContract for out of the box ephemeral serialization + with engines like DataContractSerializer, Protocol Buffers and FsPickler, + but does not guarantee any compatibility between versions. + It is not recommended to rely on this mechanism for durable persistance. + + + + + Initializes a new instance of the class. + + The sample data. + + If set to true, increased accuracy mode used. + Increased accuracy mode uses types for internal calculations. + + + Don't use increased accuracy for data sets containing large values (in absolute value). + This may cause the calculations to overflow. + + + + + Initializes a new instance of the class. + + The sample data. + + If set to true, increased accuracy mode used. + Increased accuracy mode uses types for internal calculations. + + + Don't use increased accuracy for data sets containing large values (in absolute value). + This may cause the calculations to overflow. + + + + + Gets the size of the sample. + + The size of the sample. + + + + Gets the sample mean. + + The sample mean. + + + + Gets the unbiased population variance estimator (on a dataset of size N will use an N-1 normalizer). + + The sample variance. + + + + Gets the unbiased population standard deviation (on a dataset of size N will use an N-1 normalizer). + + The sample standard deviation. + + + + Gets the sample skewness. + + The sample skewness. + Returns zero if is less than three. + + + + Gets the sample kurtosis. + + The sample kurtosis. + Returns zero if is less than four. + + + + Gets the maximum sample value. + + The maximum sample value. + + + + Gets the minimum sample value. + + The minimum sample value. + + + + Computes descriptive statistics from a stream of data values. + + A sequence of datapoints. + + + + Computes descriptive statistics from a stream of nullable data values. + + A sequence of datapoints. + + + + Computes descriptive statistics from a stream of data values. + + A sequence of datapoints. + + + + Computes descriptive statistics from a stream of nullable data values. + + A sequence of datapoints. + + + + Internal use. Method use for setting the statistics. + + For setting Mean. + For setting Variance. + For setting Skewness. + For setting Kurtosis. + For setting Minimum. + For setting Maximum. + For setting Count. + + + + A consists of a series of s, + each representing a region limited by a lower bound (exclusive) and an upper bound (inclusive). + + + This type declares a DataContract for out of the box ephemeral serialization + with engines like DataContractSerializer, Protocol Buffers and FsPickler, + but does not guarantee any compatibility between versions. + It is not recommended to rely on this mechanism for durable persistance. + + + + + This IComparer performs comparisons between a point and a bucket. + + + + + Compares a point and a bucket. The point will be encapsulated in a bucket with width 0. + + The first bucket to compare. + The second bucket to compare. + -1 when the point is less than this bucket, 0 when it is in this bucket and 1 otherwise. + + + + Lower Bound of the Bucket. + + + + + Upper Bound of the Bucket. + + + + + The number of datapoints in the bucket. + + + Value may be NaN if this was constructed as a argument. + + + + + Initializes a new instance of the Bucket class. + + + + + Constructs a Bucket that can be used as an argument for a + like when performing a Binary search. + + Value to look for + + + + Creates a copy of the Bucket with the lowerbound, upperbound and counts exactly equal. + + A cloned Bucket object. + + + + Width of the Bucket. + + + + + True if this is a single point argument for + when performing a Binary search. + + + + + Default comparer. + + + + + This method check whether a point is contained within this bucket. + + The point to check. + + 0 if the point falls within the bucket boundaries; + -1 if the point is smaller than the bucket, + +1 if the point is larger than the bucket. + + + + Comparison of two disjoint buckets. The buckets cannot be overlapping. + + + 0 if UpperBound and LowerBound are bit-for-bit equal + 1 if This bucket is lower that the compared bucket + -1 otherwise + + + + + Checks whether two Buckets are equal. + + + UpperBound and LowerBound are compared bit-for-bit, but This method tolerates a + difference in Count given by . + + + + + Provides a hash code for this bucket. + + + + + Formats a human-readable string for this bucket. + + + + + A class which computes histograms of data. + + + + + Contains all the Buckets of the Histogram. + + + + + Indicates whether the elements of buckets are currently sorted. + + + + + Initializes a new instance of the Histogram class. + + + + + Constructs a Histogram with a specific number of equally sized buckets. The upper and lower bound of the histogram + will be set to the smallest and largest datapoint. + + The datasequence to build a histogram on. + The number of buckets to use. + + + + Constructs a Histogram with a specific number of equally sized buckets. + + The datasequence to build a histogram on. + The number of buckets to use. + The histogram lower bound. + The histogram upper bound. + + + + Add one data point to the histogram. If the datapoint falls outside the range of the histogram, + the lowerbound or upperbound will automatically adapt. + + The datapoint which we want to add. + + + + Add a sequence of data point to the histogram. If the datapoint falls outside the range of the histogram, + the lowerbound or upperbound will automatically adapt. + + The sequence of datapoints which we want to add. + + + + Adds a Bucket to the Histogram. + + + + + Sort the buckets if needed. + + + + + Returns the Bucket that contains the value v. + + The point to search the bucket for. + A copy of the bucket containing point . + + + + Returns the index in the Histogram of the Bucket + that contains the value v. + + The point to search the bucket index for. + The index of the bucket containing the point. + + + + Returns the lower bound of the histogram. + + + + + Returns the upper bound of the histogram. + + + + + Gets the n'th bucket. + + The index of the bucket to be returned. + A copy of the n'th bucket. + + + + Gets the number of buckets. + + + + + Gets the total number of datapoints in the histogram. + + + + + Prints the buckets contained in the . + + + + + A hybrid Monte Carlo sampler for multivariate distributions. + + + + + Number of parameters in the density function. + + + + + Distribution to sample momentum from. + + + + + Standard deviations used in the sampling of different components of the + momentum. + + + + + Gets or sets the standard deviations used in the sampling of different components of the + momentum. + + When the length of pSdv is not the same as Length. + + + + Constructs a new Hybrid Monte Carlo sampler for a multivariate probability distribution. + The components of the momentum will be sampled from a normal distribution with standard deviation + 1 using the default random + number generator. A three point estimation will be used for differentiation. + This constructor will set the burn interval. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + When the number of burnInterval iteration is negative. + + + + Constructs a new Hybrid Monte Carlo sampler for a multivariate probability distribution. + The components of the momentum will be sampled from a normal distribution with standard deviation + specified by pSdv using the default random + number generator. A three point estimation will be used for differentiation. + This constructor will set the burn interval. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + The standard deviations of the normal distributions that are used to sample + the components of the momentum. + When the number of burnInterval iteration is negative. + + + + Constructs a new Hybrid Monte Carlo sampler for a multivariate probability distribution. + The components of the momentum will be sampled from a normal distribution with standard deviation + specified by pSdv using the a random number generator provided by the user. + A three point estimation will be used for differentiation. + This constructor will set the burn interval. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + The standard deviations of the normal distributions that are used to sample + the components of the momentum. + Random number generator used for sampling the momentum. + When the number of burnInterval iteration is negative. + + + + Constructs a new Hybrid Monte Carlo sampler for a multivariate probability distribution. + The components of the momentum will be sampled from a normal distribution with standard deviations + given by pSdv. This constructor will set the burn interval, the method used for + numerical differentiation and the random number generator. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + The standard deviations of the normal distributions that are used to sample + the components of the momentum. + Random number generator used for sampling the momentum. + The method used for numerical differentiation. + When the number of burnInterval iteration is negative. + When the length of pSdv is not the same as x0. + + + + Initialize parameters. + + The current location of the sampler. + + + + Checking that the location and the momentum are of the same dimension and that each component is positive. + + The standard deviations used for sampling the momentum. + When the length of pSdv is not the same as Length or if any + component is negative. + When pSdv is null. + + + + Use for copying objects in the Burn method. + + The source of copying. + A copy of the source object. + + + + Use for creating temporary objects in the Burn method. + + An object of type T. + + + + + + + + + + + + + Samples the momentum from a normal distribution. + + The momentum to be randomized. + + + + The default method used for computing the gradient. Uses a simple three point estimation. + + Function which the gradient is to be evaluated. + The location where the gradient is to be evaluated. + The gradient of the function at the point x. + + + + The Hybrid (also called Hamiltonian) Monte Carlo produces samples from distribution P using a set + of Hamiltonian equations to guide the sampling process. It uses the negative of the log density as + a potential energy, and a randomly generated momentum to set up a Hamiltonian system, which is then used + to sample the distribution. This can result in a faster convergence than the random walk Metropolis sampler + (). + + The type of samples this sampler produces. + + + + The delegate type that defines a derivative evaluated at a certain point. + + Function to be differentiated. + Value where the derivative is computed. + + + + Evaluates the energy function of the target distribution. + + + + + The current location of the sampler. + + + + + The number of burn iterations between two samples. + + + + + The size of each step in the Hamiltonian equation. + + + + + The number of iterations in the Hamiltonian equation. + + + + + The algorithm used for differentiation. + + + + + Gets or sets the number of iterations in between returning samples. + + When burn interval is negative. + + + + Gets or sets the number of iterations in the Hamiltonian equation. + + When frogleap steps is negative or zero. + + + + Gets or sets the size of each step in the Hamiltonian equation. + + When step size is negative or zero. + + + + Constructs a new Hybrid Monte Carlo sampler. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + Random number generator used for sampling the momentum. + The method used for differentiation. + When the number of burnInterval iteration is negative. + When either x0, pdfLnP or diff is null. + + + + Returns a sample from the distribution P. + + + + + This method runs the sampler for a number of iterations without returning a sample + + + + + Method used to update the sample location. Used in the end of the loop. + + The old energy. + The old gradient/derivative of the energy. + The new sample. + The new gradient/derivative of the energy. + The new energy. + The difference between the old Hamiltonian and new Hamiltonian. Use to determine + if an update should take place. + + + + Use for creating temporary objects in the Burn method. + + An object of type T. + + + + Use for copying objects in the Burn method. + + The source of copying. + A copy of the source object. + + + + Method for doing dot product. + + First vector/scalar in the product. + Second vector/scalar in the product. + + + + Method for adding, multiply the second vector/scalar by factor and then + add it to the first vector/scalar. + + First vector/scalar. + Scalar factor multiplying by the second vector/scalar. + Second vector/scalar. + + + + Multiplying the second vector/scalar by factor and then subtract it from + the first vector/scalar. + + First vector/scalar. + Scalar factor to be multiplied to the second vector/scalar. + Second vector/scalar. + + + + Method for sampling a random momentum. + + Momentum to be randomized. + + + + The Hamiltonian equations that is used to produce the new sample. + + + + + Method to compute the Hamiltonian used in the method. + + The momentum. + The energy. + Hamiltonian=E+p.p/2 + + + + Method to check and set a quantity to a non-negative value. + + Proposed value to be checked. + Returns value if it is greater than or equal to zero. + Throws when value is negative. + + + + Method to check and set a quantity to a non-negative value. + + Proposed value to be checked. + Returns value if it is greater than to zero. + Throws when value is negative or zero. + + + + Method to check and set a quantity to a non-negative value. + + Proposed value to be checked. + Returns value if it is greater than zero. + Throws when value is negative or zero. + + + + Provides utilities to analysis the convergence of a set of samples from + a . + + + + + Computes the auto correlations of a series evaluated by a function f. + + The series for computing the auto correlation. + The lag in the series + The function used to evaluate the series. + The auto correlation. + Throws if lag is zero or if lag is + greater than or equal to the length of Series. + + + + Computes the effective size of the sample when evaluated by a function f. + + The samples. + The function use for evaluating the series. + The effective size when auto correlation is taken into account. + + + + A method which samples datapoints from a proposal distribution. The implementation of this sampler + is stateless: no variables are saved between two calls to Sample. This proposal is different from + in that it doesn't take any parameters; it samples random + variables from the whole domain. + + The type of the datapoints. + A sample from the proposal distribution. + + + + A method which samples datapoints from a proposal distribution given an initial sample. The implementation + of this sampler is stateless: no variables are saved between two calls to Sample. This proposal is different from + in that it samples locally around an initial point. In other words, it + makes a small local move rather than producing a global sample from the proposal. + + The type of the datapoints. + The initial sample. + A sample from the proposal distribution. + + + + A function which evaluates a density. + + The type of data the distribution is over. + The sample we want to evaluate the density for. + + + + A function which evaluates a log density. + + The type of data the distribution is over. + The sample we want to evaluate the log density for. + + + + A function which evaluates the log of a transition kernel probability. + + The type for the space over which this transition kernel is defined. + The new state in the transition. + The previous state in the transition. + The log probability of the transition. + + + + The interface which every sampler must implement. + + The type of samples this sampler produces. + + + + The random number generator for this class. + + + + + Keeps track of the number of accepted samples. + + + + + Keeps track of the number of calls to the proposal sampler. + + + + + Initializes a new instance of the class. + + Thread safe instances are two and half times slower than non-thread + safe classes. + + + + Gets or sets the random number generator. + + When the random number generator is null. + + + + Returns one sample. + + + + + Returns a number of samples. + + The number of samples we want. + An array of samples. + + + + Gets the acceptance rate of the sampler. + + + + + Metropolis-Hastings sampling produces samples from distribition P by sampling from a proposal distribution Q + and accepting/rejecting based on the density of P. Metropolis-Hastings sampling doesn't require that the + proposal distribution Q is symmetric in comparison to . It does need to + be able to evaluate the proposal sampler's log density though. All densities are required to be in log space. + + The Metropolis-Hastings sampler is a stateful sampler. It keeps track of where it currently is in the domain + of the distribution P. + + The type of samples this sampler produces. + + + + Evaluates the log density function of the target distribution. + + + + + Evaluates the log transition probability for the proposal distribution. + + + + + A function which samples from a proposal distribution. + + + + + The current location of the sampler. + + + + + The log density at the current location. + + + + + The number of burn iterations between two samples. + + + + + Constructs a new Metropolis-Hastings sampler using the default random number generator. This + constructor will set the burn interval. + + The initial sample. + The log density of the distribution we want to sample from. + The log transition probability for the proposal distribution. + A method that samples from the proposal distribution. + The number of iterations in between returning samples. + When the number of burnInterval iteration is negative. + + + + Gets or sets the number of iterations in between returning samples. + + When burn interval is negative. + + + + This method runs the sampler for a number of iterations without returning a sample + + + + + Returns a sample from the distribution P. + + + + + Metropolis sampling produces samples from distribition P by sampling from a proposal distribution Q + and accepting/rejecting based on the density of P. Metropolis sampling requires that the proposal + distribution Q is symmetric. All densities are required to be in log space. + + The Metropolis sampler is a stateful sampler. It keeps track of where it currently is in the domain + of the distribution P. + + The type of samples this sampler produces. + + + + Evaluates the log density function of the sampling distribution. + + + + + A function which samples from a proposal distribution. + + + + + The current location of the sampler. + + + + + The log density at the current location. + + + + + The number of burn iterations between two samples. + + + + + Constructs a new Metropolis sampler using the default random number generator. + + The initial sample. + The log density of the distribution we want to sample from. + A method that samples from the symmetric proposal distribution. + The number of iterations in between returning samples. + When the number of burnInterval iteration is negative. + + + + Gets or sets the number of iterations in between returning samples. + + When burn interval is negative. + + + + This method runs the sampler for a number of iterations without returning a sample + + + + + Returns a sample from the distribution P. + + + + + Rejection sampling produces samples from distribition P by sampling from a proposal distribution Q + and accepting/rejecting based on the density of P and Q. The density of P and Q don't need to + to be normalized, but we do need that for each x, P(x) < Q(x). + + The type of samples this sampler produces. + + + + Evaluates the density function of the sampling distribution. + + + + + Evaluates the density function of the proposal distribution. + + + + + A function which samples from a proposal distribution. + + + + + Constructs a new rejection sampler using the default random number generator. + + The density of the distribution we want to sample from. + The density of the proposal distribution. + A method that samples from the proposal distribution. + + + + Returns a sample from the distribution P. + + When the algorithms detects that the proposal + distribution doesn't upper bound the target distribution. + + + + A hybrid Monte Carlo sampler for univariate distributions. + + + + + Distribution to sample momentum from. + + + + + Standard deviations used in the sampling of the + momentum. + + + + + Gets or sets the standard deviation used in the sampling of the + momentum. + + When standard deviation is negative. + + + + Constructs a new Hybrid Monte Carlo sampler for a univariate probability distribution. + The momentum will be sampled from a normal distribution with standard deviation + specified by pSdv using the default random + number generator. A three point estimation will be used for differentiation. + This constructor will set the burn interval. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + The standard deviation of the normal distribution that is used to sample + the momentum. + When the number of burnInterval iteration is negative. + + + + Constructs a new Hybrid Monte Carlo sampler for a univariate probability distribution. + The momentum will be sampled from a normal distribution with standard deviation + specified by pSdv using a random + number generator provided by the user. A three point estimation will be used for differentiation. + This constructor will set the burn interval. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + The standard deviation of the normal distribution that is used to sample + the momentum. + Random number generator used to sample the momentum. + When the number of burnInterval iteration is negative. + + + + Constructs a new Hybrid Monte Carlo sampler for a multivariate probability distribution. + The momentum will be sampled from a normal distribution with standard deviation + given by pSdv using a random + number generator provided by the user. This constructor will set both the burn interval and the method used for + numerical differentiation. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + The standard deviation of the normal distribution that is used to sample + the momentum. + The method used for numerical differentiation. + Random number generator used for sampling the momentum. + When the number of burnInterval iteration is negative. + + + + Use for copying objects in the Burn method. + + The source of copying. + A copy of the source object. + + + + Use for creating temporary objects in the Burn method. + + An object of type T. + + + + + + + + + + + + + Samples the momentum from a normal distribution. + + The momentum to be randomized. + + + + The default method used for computing the derivative. Uses a simple three point estimation. + + Function for which the derivative is to be evaluated. + The location where the derivative is to be evaluated. + The derivative of the function at the point x. + + + + Slice sampling produces samples from distribition P by uniformly sampling from under the pdf of P using + a technique described in "Slice Sampling", R. Neal, 2003. All densities are required to be in log space. + + The slice sampler is a stateful sampler. It keeps track of where it currently is in the domain + of the distribution P. + + + + + Evaluates the log density function of the target distribution. + + + + + The current location of the sampler. + + + + + The log density at the current location. + + + + + The number of burn iterations between two samples. + + + + + The scale of the slice sampler. + + + + + Constructs a new Slice sampler using the default random + number generator. The burn interval will be set to 0. + + The initial sample. + The density of the distribution we want to sample from. + The scale factor of the slice sampler. + When the scale of the slice sampler is not positive. + + + + Constructs a new slice sampler using the default random number generator. It + will set the number of burnInterval iterations and run a burnInterval phase. + + The initial sample. + The density of the distribution we want to sample from. + The number of iterations in between returning samples. + The scale factor of the slice sampler. + When the number of burnInterval iteration is negative. + When the scale of the slice sampler is not positive. + + + + Gets or sets the number of iterations in between returning samples. + + When burn interval is negative. + + + + Gets or sets the scale of the slice sampler. + + + + + This method runs the sampler for a number of iterations without returning a sample + + + + + Returns a sample from the distribution P. + + + + + Running statistics over a window of data, allows updating by adding values. + + + + + Gets the total number of samples. + + + + + Returns the minimum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + + + + Returns the maximum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + + + + Evaluates the sample mean, an estimate of the population mean. + Returns NaN if data is empty or if any entry is NaN. + + + + + Estimates the unbiased population variance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + + + + Evaluates the variance from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + + + + Estimates the unbiased population standard deviation from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + + + + Evaluates the standard deviation from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + + + + Update the running statistics by adding another observed sample (in-place). + + + + + Update the running statistics by adding a sequence of observed sample (in-place). + + + + Replace ties with their mean (non-integer ranks). Default. + + + Replace ties with their minimum (typical sports ranking). + + + Replace ties with their maximum. + + + Permutation with increasing values at each index of ties. + + + + Running statistics accumulator, allows updating by adding values + or by combining two accumulators. + + + This type declares a DataContract for out of the box ephemeral serialization + with engines like DataContractSerializer, Protocol Buffers and FsPickler, + but does not guarantee any compatibility between versions. + It is not recommended to rely on this mechanism for durable persistance. + + + + + Gets the total number of samples. + + + + + Returns the minimum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + + + + Returns the maximum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + + + + Evaluates the sample mean, an estimate of the population mean. + Returns NaN if data is empty or if any entry is NaN. + + + + + Estimates the unbiased population variance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + + + + Evaluates the variance from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + + + + Estimates the unbiased population standard deviation from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + + + + Evaluates the standard deviation from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + + + + Estimates the unbiased population skewness from the provided samples. + Uses a normalizer (Bessel's correction; type 2). + Returns NaN if data has less than three entries or if any entry is NaN. + + + + + Evaluates the population skewness from the full population. + Does not use a normalizer and would thus be biased if applied to a subset (type 1). + Returns NaN if data has less than two entries or if any entry is NaN. + + + + + Estimates the unbiased population kurtosis from the provided samples. + Uses a normalizer (Bessel's correction; type 2). + Returns NaN if data has less than four entries or if any entry is NaN. + + + + + Evaluates the population kurtosis from the full population. + Does not use a normalizer and would thus be biased if applied to a subset (type 1). + Returns NaN if data has less than three entries or if any entry is NaN. + + + + + Update the running statistics by adding another observed sample (in-place). + + + + + Update the running statistics by adding a sequence of observed sample (in-place). + + + + + Create a new running statistics over the combined samples of two existing running statistics. + + + + + Statistics operating on an array already sorted ascendingly. + + + + + + + + Returns the smallest value from the sorted data array (ascending). + + Sample array, must be sorted ascendingly. + + + + Returns the largest value from the sorted data array (ascending). + + Sample array, must be sorted ascendingly. + + + + Returns the order statistic (order 1..N) from the sorted data array (ascending). + + Sample array, must be sorted ascendingly. + One-based order of the statistic, must be between 1 and N (inclusive). + + + + Estimates the median value from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the p-Percentile value from the sorted data array (ascending). + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + Percentile selector, between 0 and 100 (inclusive). + + + + Estimates the first quartile value from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the third quartile value from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the inter-quartile range from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates {min, lower-quantile, median, upper-quantile, max} from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the tau-th quantile from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + Quantile selector, between 0.0 and 1.0 (inclusive). + + R-8, SciPy-(1/3,1/3): + Linear interpolation of the approximate medians for order statistics. + When tau < (2/3) / (N + 1/3), use x1. When tau >= (N - 1/3) / (N + 1/3), use xN. + + + + + Estimates the tau-th quantile from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile defintion can be specified + by 4 parameters a, b, c and d, consistent with Mathematica. + + Sample array, must be sorted ascendingly. + Quantile selector, between 0.0 and 1.0 (inclusive). + a-parameter + b-parameter + c-parameter + d-parameter + + + + Estimates the tau-th quantile from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + Sample array, must be sorted ascendingly. + Quantile selector, between 0.0 and 1.0 (inclusive). + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the empirical cumulative distribution function (CDF) at x from the sorted data array (ascending). + + The data sample sequence. + The value where to estimate the CDF at. + + + + Estimates the quantile tau from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile value. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Evaluates the rank of each entry of the sorted data array (ascending). + The rank definition can be specified to be compatible + with an existing system. + + + + + Returns the smallest value from the sorted data array (ascending). + + Sample array, must be sorted ascendingly. + + + + Returns the largest value from the sorted data array (ascending). + + Sample array, must be sorted ascendingly. + + + + Returns the order statistic (order 1..N) from the sorted data array (ascending). + + Sample array, must be sorted ascendingly. + One-based order of the statistic, must be between 1 and N (inclusive). + + + + Estimates the median value from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the p-Percentile value from the sorted data array (ascending). + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + Percentile selector, between 0 and 100 (inclusive). + + + + Estimates the first quartile value from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the third quartile value from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the inter-quartile range from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates {min, lower-quantile, median, upper-quantile, max} from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the tau-th quantile from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + Quantile selector, between 0.0 and 1.0 (inclusive). + + R-8, SciPy-(1/3,1/3): + Linear interpolation of the approximate medians for order statistics. + When tau < (2/3) / (N + 1/3), use x1. When tau >= (N - 1/3) / (N + 1/3), use xN. + + + + + Estimates the tau-th quantile from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile defintion can be specified + by 4 parameters a, b, c and d, consistent with Mathematica. + + Sample array, must be sorted ascendingly. + Quantile selector, between 0.0 and 1.0 (inclusive). + a-parameter + b-parameter + c-parameter + d-parameter + + + + Estimates the tau-th quantile from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + Sample array, must be sorted ascendingly. + Quantile selector, between 0.0 and 1.0 (inclusive). + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the empirical cumulative distribution function (CDF) at x from the sorted data array (ascending). + + The data sample sequence. + The value where to estimate the CDF at. + + + + Estimates the quantile tau from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile value. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Evaluates the rank of each entry of the sorted data array (ascending). + The rank definition can be specified to be compatible + with an existing system. + + + + + Extension methods to return basic statistics on set of data. + + + + + Returns the minimum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Returns the minimum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Returns the minimum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + Null-entries are ignored. + + The sample data. + The minimum value in the sample data. + + + + Returns the maximum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The maximum value in the sample data. + + + + Returns the maximum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The maximum value in the sample data. + + + + Returns the maximum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + Null-entries are ignored. + + The sample data. + The maximum value in the sample data. + + + + Returns the minimum absolute value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Returns the minimum absolute value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Returns the maximum absolute value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The maximum value in the sample data. + + + + Returns the maximum absolute value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The maximum value in the sample data. + + + + Returns the minimum magnitude and phase value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Returns the minimum magnitude and phase value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Returns the maximum magnitude and phase value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Returns the maximum magnitude and phase value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Evaluates the sample mean, an estimate of the population mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the mean of. + The mean of the sample. + + + + Evaluates the sample mean, an estimate of the population mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the mean of. + The mean of the sample. + + + + Evaluates the sample mean, an estimate of the population mean. + Returns NaN if data is empty or if any entry is NaN. + Null-entries are ignored. + + The data to calculate the mean of. + The mean of the sample. + + + + Evaluates the geometric mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the geometric mean of. + The geometric mean of the sample. + + + + Evaluates the geometric mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the geometric mean of. + The geometric mean of the sample. + + + + Evaluates the harmonic mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the harmonic mean of. + The harmonic mean of the sample. + + + + Evaluates the harmonic mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the harmonic mean of. + The harmonic mean of the sample. + + + + Estimates the unbiased population variance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population variance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population variance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + Null-entries are ignored. + + A subset of samples, sampled from the full population. + + + + Evaluates the variance from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + The full population data. + + + + Evaluates the variance from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + The full population data. + + + + Evaluates the variance from the provided full population. + On a dataset of size N will use an N normalize and would thus be biased if applied to a subsetr. + Returns NaN if data is empty or if any entry is NaN. + Null-entries are ignored. + + The full population data. + + + + Estimates the unbiased population standard deviation from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population standard deviation from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population standard deviation from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + Null-entries are ignored. + + A subset of samples, sampled from the full population. + + + + Evaluates the standard deviation from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + The full population data. + + + + Evaluates the standard deviation from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + The full population data. + + + + Evaluates the standard deviation from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + Null-entries are ignored. + + The full population data. + + + + Estimates the unbiased population skewness from the provided samples. + Uses a normalizer (Bessel's correction; type 2). + Returns NaN if data has less than three entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population skewness from the provided samples. + Uses a normalizer (Bessel's correction; type 2). + Returns NaN if data has less than three entries or if any entry is NaN. + Null-entries are ignored. + + A subset of samples, sampled from the full population. + + + + Evaluates the skewness from the full population. + Does not use a normalizer and would thus be biased if applied to a subset (type 1). + Returns NaN if data has less than two entries or if any entry is NaN. + + The full population data. + + + + Evaluates the skewness from the full population. + Does not use a normalizer and would thus be biased if applied to a subset (type 1). + Returns NaN if data has less than two entries or if any entry is NaN. + Null-entries are ignored. + + The full population data. + + + + Estimates the unbiased population kurtosis from the provided samples. + Uses a normalizer (Bessel's correction; type 2). + Returns NaN if data has less than four entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population kurtosis from the provided samples. + Uses a normalizer (Bessel's correction; type 2). + Returns NaN if data has less than four entries or if any entry is NaN. + Null-entries are ignored. + + A subset of samples, sampled from the full population. + + + + Evaluates the kurtosis from the full population. + Does not use a normalizer and would thus be biased if applied to a subset (type 1). + Returns NaN if data has less than three entries or if any entry is NaN. + + The full population data. + + + + Evaluates the kurtosis from the full population. + Does not use a normalizer and would thus be biased if applied to a subset (type 1). + Returns NaN if data has less than three entries or if any entry is NaN. + Null-entries are ignored. + + The full population data. + + + + Estimates the sample mean and the unbiased population variance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or if any entry is NaN and NaN for variance if data has less than two entries or if any entry is NaN. + + The data to calculate the mean of. + The mean of the sample. + + + + Estimates the sample mean and the unbiased population variance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or if any entry is NaN and NaN for variance if data has less than two entries or if any entry is NaN. + + The data to calculate the mean of. + The mean of the sample. + + + + Estimates the sample mean and the unbiased population standard deviation from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or if any entry is NaN and NaN for standard deviation if data has less than two entries or if any entry is NaN. + + The data to calculate the mean of. + The mean of the sample. + + + + Estimates the sample mean and the unbiased population standard deviation from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or if any entry is NaN and NaN for standard deviation if data has less than two entries or if any entry is NaN. + + The data to calculate the mean of. + The mean of the sample. + + + + Estimates the unbiased population skewness and kurtosis from the provided samples in a single pass. + Uses a normalizer (Bessel's correction; type 2). + + A subset of samples, sampled from the full population. + + + + Evaluates the skewness and kurtosis from the full population. + Does not use a normalizer and would thus be biased if applied to a subset (type 1). + + The full population data. + + + + Estimates the unbiased population covariance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population covariance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population covariance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + Null-entries are ignored. + + A subset of samples, sampled from the full population. + A subset of samples, sampled from the full population. + + + + Evaluates the population covariance from the provided full populations. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + The full population data. + The full population data. + + + + Evaluates the population covariance from the provided full populations. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + The full population data. + The full population data. + + + + Evaluates the population covariance from the provided full populations. + On a dataset of size N will use an N normalize and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + Null-entries are ignored. + + The full population data. + The full population data. + + + + Evaluates the root mean square (RMS) also known as quadratic mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the RMS of. + + + + Evaluates the root mean square (RMS) also known as quadratic mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the RMS of. + + + + Evaluates the root mean square (RMS) also known as quadratic mean. + Returns NaN if data is empty or if any entry is NaN. + Null-entries are ignored. + + The data to calculate the mean of. + + + + Estimates the sample median from the provided samples (R8). + + The data sample sequence. + + + + Estimates the sample median from the provided samples (R8). + + The data sample sequence. + + + + Estimates the sample median from the provided samples (R8). + + The data sample sequence. + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the p-Percentile value from the provided samples. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + Percentile selector, between 0 and 100 (inclusive). + + + + Estimates the p-Percentile value from the provided samples. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + Percentile selector, between 0 and 100 (inclusive). + + + + Estimates the p-Percentile value from the provided samples. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + Percentile selector, between 0 and 100 (inclusive). + + + + Estimates the p-Percentile value from the provided samples. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the p-Percentile value from the provided samples. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the p-Percentile value from the provided samples. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the first quartile value from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the first quartile value from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the first quartile value from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the third quartile value from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the third quartile value from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the third quartile value from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the inter-quartile range from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the inter-quartile range from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the inter-quartile range from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates {min, lower-quantile, median, upper-quantile, max} from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates {min, lower-quantile, median, upper-quantile, max} from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates {min, lower-quantile, median, upper-quantile, max} from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Returns the order statistic (order 1..N) from the provided samples. + + The data sample sequence. + One-based order of the statistic, must be between 1 and N (inclusive). + + + + Returns the order statistic (order 1..N) from the provided samples. + + The data sample sequence. + One-based order of the statistic, must be between 1 and N (inclusive). + + + + Returns the order statistic (order 1..N) from the provided samples. + + The data sample sequence. + + + + Returns the order statistic (order 1..N) from the provided samples. + + The data sample sequence. + + + + Evaluates the rank of each entry of the provided samples. + The rank definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Evaluates the rank of each entry of the provided samples. + The rank definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Evaluates the rank of each entry of the provided samples. + The rank definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Estimates the quantile tau from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile value. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Estimates the quantile tau from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile value. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Estimates the quantile tau from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile value. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Estimates the quantile tau from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Estimates the quantile tau from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Estimates the quantile tau from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Estimates the empirical cumulative distribution function (CDF) at x from the provided samples. + + The data sample sequence. + The value where to estimate the CDF at. + + + + Estimates the empirical cumulative distribution function (CDF) at x from the provided samples. + + The data sample sequence. + The value where to estimate the CDF at. + + + + Estimates the empirical cumulative distribution function (CDF) at x from the provided samples. + + The data sample sequence. + The value where to estimate the CDF at. + + + + Estimates the empirical cumulative distribution function (CDF) at x from the provided samples. + + The data sample sequence. + + + + Estimates the empirical cumulative distribution function (CDF) at x from the provided samples. + + The data sample sequence. + + + + Estimates the empirical cumulative distribution function (CDF) at x from the provided samples. + + The data sample sequence. + + + + Estimates the empirical inverse CDF at tau from the provided samples. + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + + + + Estimates the empirical inverse CDF at tau from the provided samples. + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + + + + Estimates the empirical inverse CDF at tau from the provided samples. + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + + + + Estimates the empirical inverse CDF at tau from the provided samples. + + The data sample sequence. + + + + Estimates the empirical inverse CDF at tau from the provided samples. + + The data sample sequence. + + + + Estimates the empirical inverse CDF at tau from the provided samples. + + The data sample sequence. + + + + Calculates the entropy of a stream of double values in bits. + Returns NaN if any of the values in the stream are NaN. + + The data sample sequence. + + + + Calculates the entropy of a stream of double values in bits. + Returns NaN if any of the values in the stream are NaN. + Null-entries are ignored. + + The data sample sequence. + + + + Evaluates the sample mean over a moving window, for each samples. + Returns NaN if no data is empty or if any entry is NaN. + + The sample stream to calculate the mean of. + The number of last samples to consider. + + + + Statistics operating on an IEnumerable in a single pass, without keeping the full data in memory. + Can be used in a streaming way, e.g. on large datasets not fitting into memory. + + + + + + + + Returns the smallest value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the smallest value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the largest value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the largest value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the smallest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the smallest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the largest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the largest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the smallest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the smallest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the largest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the largest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the arithmetic sample mean from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the arithmetic sample mean from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the geometric mean of the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the geometric mean of the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the harmonic mean of the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the harmonic mean of the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the unbiased population variance from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the unbiased population variance from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the population variance from the full population provided as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the population variance from the full population provided as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the unbiased population standard deviation from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the unbiased population standard deviation from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the population standard deviation from the full population provided as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the population standard deviation from the full population provided as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population variance from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN, and NaN for variance if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population variance from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN, and NaN for variance if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population standard deviation from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN, and NaN for standard deviation if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population standard deviation from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN, and NaN for standard deviation if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the unbiased population covariance from the provided two sample enumerable sequences, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + First sample stream. + Second sample stream. + + + + Estimates the unbiased population covariance from the provided two sample enumerable sequences, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + First sample stream. + Second sample stream. + + + + Evaluates the population covariance from the full population provided as two enumerable sequences, in a single pass without memoization. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + First population stream. + Second population stream. + + + + Evaluates the population covariance from the full population provided as two enumerable sequences, in a single pass without memoization. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + First population stream. + Second population stream. + + + + Estimates the root mean square (RMS) also known as quadratic mean from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the root mean square (RMS) also known as quadratic mean from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Calculates the entropy of a stream of double values. + Returns NaN if any of the values in the stream are NaN. + + The input stream to evaluate. + + + + + Used to simplify parallel code, particularly between the .NET 4.0 and Silverlight Code. + + + + + Executes a for loop in which iterations may run in parallel. + + The start index, inclusive. + The end index, exclusive. + The body to be invoked for each iteration range. + + + + Executes a for loop in which iterations may run in parallel. + + The start index, inclusive. + The end index, exclusive. + The partition size for splitting work into smaller pieces. + The body to be invoked for each iteration range. + + + + Executes each of the provided actions inside a discrete, asynchronous task. + + An array of actions to execute. + The actions array contains a null element. + At least one invocation of the actions threw an exception. + + + + Selects an item (such as Max or Min). + + Starting index of the loop. + Ending index of the loop + The function to select items over a subset. + The function to select the item of selection from the subsets. + The selected value. + + + + Selects an item (such as Max or Min). + + The array to iterate over. + The function to select items over a subset. + The function to select the item of selection from the subsets. + The selected value. + + + + Selects an item (such as Max or Min). + + Starting index of the loop. + Ending index of the loop + The function to select items over a subset. + The function to select the item of selection from the subsets. + Default result of the reduce function on an empty set. + The selected value. + + + + Selects an item (such as Max or Min). + + The array to iterate over. + The function to select items over a subset. + The function to select the item of selection from the subsets. + Default result of the reduce function on an empty set. + The selected value. + + + + Double-precision trigonometry toolkit. + + + + + Constant to convert a degree to grad. + + + + + Converts a degree (360-periodic) angle to a grad (400-periodic) angle. + + The degree to convert. + The converted grad angle. + + + + Converts a degree (360-periodic) angle to a radian (2*Pi-periodic) angle. + + The degree to convert. + The converted radian angle. + + + + Converts a grad (400-periodic) angle to a degree (360-periodic) angle. + + The grad to convert. + The converted degree. + + + + Converts a grad (400-periodic) angle to a radian (2*Pi-periodic) angle. + + The grad to convert. + The converted radian. + + + + Converts a radian (2*Pi-periodic) angle to a degree (360-periodic) angle. + + The radian to convert. + The converted degree. + + + + Converts a radian (2*Pi-periodic) angle to a grad (400-periodic) angle. + + The radian to convert. + The converted grad. + + + + Normalized Sinc function. sinc(x) = sin(pi*x)/(pi*x). + + + + + Trigonometric Sine of an angle in radian, or opposite / hypotenuse. + + The angle in radian. + The sine of the radian angle. + + + + Trigonometric Sine of a Complex number. + + The complex value. + The sine of the complex number. + + + + Trigonometric Cosine of an angle in radian, or adjacent / hypotenuse. + + The angle in radian. + The cosine of an angle in radian. + + + + Trigonometric Cosine of a Complex number. + + The complex value. + The cosine of a complex number. + + + + Trigonometric Tangent of an angle in radian, or opposite / adjacent. + + The angle in radian. + The tangent of the radian angle. + + + + Trigonometric Tangent of a Complex number. + + The complex value. + The tangent of the complex number. + + + + Trigonometric Cotangent of an angle in radian, or adjacent / opposite. Reciprocal of the tangent. + + The angle in radian. + The cotangent of an angle in radian. + + + + Trigonometric Cotangent of a Complex number. + + The complex value. + The cotangent of the complex number. + + + + Trigonometric Secant of an angle in radian, or hypotenuse / adjacent. Reciprocal of the cosine. + + The angle in radian. + The secant of the radian angle. + + + + Trigonometric Secant of a Complex number. + + The complex value. + The secant of the complex number. + + + + Trigonometric Cosecant of an angle in radian, or hypotenuse / opposite. Reciprocal of the sine. + + The angle in radian. + Cosecant of an angle in radian. + + + + Trigonometric Cosecant of a Complex number. + + The complex value. + The cosecant of a complex number. + + + + Trigonometric principal Arc Sine in radian + + The opposite for a unit hypotenuse (i.e. opposite / hyptenuse). + The angle in radian. + + + + Trigonometric principal Arc Sine of this Complex number. + + The complex value. + The arc sine of a complex number. + + + + Trigonometric principal Arc Cosine in radian + + The adjacent for a unit hypotenuse (i.e. adjacent / hypotenuse). + The angle in radian. + + + + Trigonometric principal Arc Cosine of this Complex number. + + The complex value. + The arc cosine of a complex number. + + + + Trigonometric principal Arc Tangent in radian + + The opposite for a unit adjacent (i.e. opposite / adjacent). + The angle in radian. + + + + Trigonometric principal Arc Tangent of this Complex number. + + The complex value. + The arc tangent of a complex number. + + + + Trigonometric principal Arc Cotangent in radian + + The adjacent for a unit opposite (i.e. adjacent / opposite). + The angle in radian. + + + + Trigonometric principal Arc Cotangent of this Complex number. + + The complex value. + The arc cotangent of a complex number. + + + + Trigonometric principal Arc Secant in radian + + The hypotenuse for a unit adjacent (i.e. hypotenuse / adjacent). + The angle in radian. + + + + Trigonometric principal Arc Secant of this Complex number. + + The complex value. + The arc secant of a complex number. + + + + Trigonometric principal Arc Cosecant in radian + + The hypotenuse for a unit opposite (i.e. hypotenuse / opposite). + The angle in radian. + + + + Trigonometric principal Arc Cosecant of this Complex number. + + The complex value. + The arc cosecant of a complex number. + + + + Hyperbolic Sine + + The hyperbolic angle, i.e. the area of the hyperbolic sector. + The hyperbolic sine of the angle. + + + + Hyperbolic Sine of a Complex number. + + The complex value. + The hyperbolic sine of a complex number. + + + + Hyperbolic Cosine + + The hyperbolic angle, i.e. the area of the hyperbolic sector. + The hyperbolic Cosine of the angle. + + + + Hyperbolic Cosine of a Complex number. + + The complex value. + The hyperbolic cosine of a complex number. + + + + Hyperbolic Tangent in radian + + The hyperbolic angle, i.e. the area of the hyperbolic sector. + The hyperbolic tangent of the angle. + + + + Hyperbolic Tangent of a Complex number. + + The complex value. + The hyperbolic tangent of a complex number. + + + + Hyperbolic Cotangent + + The hyperbolic angle, i.e. the area of the hyperbolic sector. + The hyperbolic cotangent of the angle. + + + + Hyperbolic Cotangent of a Complex number. + + The complex value. + The hyperbolic cotangent of a complex number. + + + + Hyperbolic Secant + + The hyperbolic angle, i.e. the area of the hyperbolic sector. + The hyperbolic secant of the angle. + + + + Hyperbolic Secant of a Complex number. + + The complex value. + The hyperbolic secant of a complex number. + + + + Hyperbolic Cosecant + + The hyperbolic angle, i.e. the area of the hyperbolic sector. + The hyperbolic cosecant of the angle. + + + + Hyperbolic Cosecant of a Complex number. + + The complex value. + The hyperbolic cosecant of a complex number. + + + + Hyperbolic Area Sine + + The real value. + The hyperbolic angle, i.e. the area of its hyperbolic sector. + + + + Hyperbolic Area Sine of this Complex number. + + The complex value. + The hyperbolic arc sine of a complex number. + + + + Hyperbolic Area Cosine + + The real value. + The hyperbolic angle, i.e. the area of its hyperbolic sector. + + + + Hyperbolic Area Cosine of this Complex number. + + The complex value. + The hyperbolic arc cosine of a complex number. + + + + Hyperbolic Area Tangent + + The real value. + The hyperbolic angle, i.e. the area of its hyperbolic sector. + + + + Hyperbolic Area Tangent of this Complex number. + + The complex value. + The hyperbolic arc tangent of a complex number. + + + + Hyperbolic Area Cotangent + + The real value. + The hyperbolic angle, i.e. the area of its hyperbolic sector. + + + + Hyperbolic Area Cotangent of this Complex number. + + The complex value. + The hyperbolic arc cotangent of a complex number. + + + + Hyperbolic Area Secant + + The real value. + The hyperbolic angle, i.e. the area of its hyperbolic sector. + + + + Hyperbolic Area Secant of this Complex number. + + The complex value. + The hyperbolic arc secant of a complex number. + + + + Hyperbolic Area Cosecant + + The real value. + The hyperbolic angle, i.e. the area of its hyperbolic sector. + + + + Hyperbolic Area Cosecant of this Complex number. + + The complex value. + The hyperbolic arc cosecant of a complex number. + + + + Hamming window. Named after Richard Hamming. + Symmetric version, useful e.g. for filter design purposes. + + + + + Hamming window. Named after Richard Hamming. + Periodic version, useful e.g. for FFT purposes. + + + + + Hann window. Named after Julius von Hann. + Symmetric version, useful e.g. for filter design purposes. + + + + + Hann window. Named after Julius von Hann. + Periodic version, useful e.g. for FFT purposes. + + + + + Cosine window. + Symmetric version, useful e.g. for filter design purposes. + + + + + Cosine window. + Periodic version, useful e.g. for FFT purposes. + + + + + Lanczos window. + Symmetric version, useful e.g. for filter design purposes. + + + + + Lanczos window. + Periodic version, useful e.g. for FFT purposes. + + + + + Gauss window. + + + + + Blackman window. + + + + + Blackman-Harris window. + + + + + Blackman-Nuttall window. + + + + + Bartlett window. + + + + + Bartlett-Hann window. + + + + + Nuttall window. + + + + + Flat top window. + + + + + Uniform rectangular (dirichlet) window. + + + + + Triangular window. + + + + + A strongly-typed resource class, for looking up localized strings, etc. + + + + + Returns the cached ResourceManager instance used by this class. + + + + + Overrides the current thread's CurrentUICulture property for all + resource lookups using this strongly typed resource class. + + + + + Looks up a localized string similar to The accuracy couldn't be reached with the specified number of iterations.. + + + + + Looks up a localized string similar to The array arguments must have the same length.. + + + + + Looks up a localized string similar to The given array has the wrong length. Should be {0}.. + + + + + Looks up a localized string similar to The argument must be between 0 and 1.. + + + + + Looks up a localized string similar to Value cannot be in the range -1 < x < 1.. + + + + + Looks up a localized string similar to Value must be even.. + + + + + Looks up a localized string similar to The histogram does not contain the value.. + + + + + Looks up a localized string similar to Value is expected to be between {0} and {1} (including {0} and {1}).. + + + + + Looks up a localized string similar to At least one item of {0} is a null reference (Nothing in Visual Basic).. + + + + + Looks up a localized string similar to Value must be greater than or equal to one.. + + + + + Looks up a localized string similar to Matrix dimensions must agree.. + + + + + Looks up a localized string similar to Matrix dimensions must agree: {0}.. + + + + + Looks up a localized string similar to Matrix dimensions must agree: op1 is {0}, op2 is {1}.. + + + + + Looks up a localized string similar to Matrix dimensions must agree: op1 is {0}, op2 is {1}, op3 is {2}.. + + + + + Looks up a localized string similar to The requested matrix does not exist.. + + + + + Looks up a localized string similar to The matrix indices must not be out of range of the given matrix.. + + + + + Looks up a localized string similar to Matrix must not be rank deficient.. + + + + + Looks up a localized string similar to Matrix must not be singular.. + + + + + Looks up a localized string similar to Matrix must be positive definite.. + + + + + Looks up a localized string similar to Matrix column dimensions must agree.. + + + + + Looks up a localized string similar to Matrix row dimensions must agree.. + + + + + Looks up a localized string similar to Matrix must have exactly one column.. + + + + + Looks up a localized string similar to Matrix must have exactly one column and row, thus have only one cell.. + + + + + Looks up a localized string similar to Matrix must have exactly one row.. + + + + + Looks up a localized string similar to Matrix must be square.. + + + + + Looks up a localized string similar to Matrix must be symmetric.. + + + + + Looks up a localized string similar to Matrix must be symmetric positive definite.. + + + + + Looks up a localized string similar to In the specified range, the exclusive maximum must be greater than the inclusive minimum.. + + + + + Looks up a localized string similar to In the specified range, the minimum is greater than maximum.. + + + + + Looks up a localized string similar to Value must be positive.. + + + + + Looks up a localized string similar to Value must neither be infinite nor NaN.. + + + + + Looks up a localized string similar to Value must not be negative (zero is ok).. + + + + + Looks up a localized string similar to {0} is a null reference (Nothing in Visual Basic).. + + + + + Looks up a localized string similar to Value must be odd.. + + + + + Looks up a localized string similar to {0} must be greater than {1}.. + + + + + Looks up a localized string similar to {0} must be greater than or equal to {1}.. + + + + + Looks up a localized string similar to {0} must be smaller than {1}.. + + + + + Looks up a localized string similar to {0} must be smaller than or equal to {1}.. + + + + + Looks up a localized string similar to The chosen parameter set is invalid (probably some value is out of range).. + + + + + Looks up a localized string similar to The given expression does not represent a complex number.. + + + + + Looks up a localized string similar to Value must be positive (and not zero).. + + + + + Looks up a localized string similar to Size must be a Power of Two.. + + + + + Looks up a localized string similar to Size must be a Power of Two in every dimension.. + + + + + Looks up a localized string similar to The range between {0} and {1} must be less than or equal to {2}.. + + + + + Looks up a localized string similar to Arguments must be different objects.. + + + + + Looks up a localized string similar to Array must have exactly one dimension (and not be null).. + + + + + Looks up a localized string similar to Value is too large.. + + + + + Looks up a localized string similar to Value is too large for the current iteration limit.. + + + + + Looks up a localized string similar to Type mismatch.. + + + + + Looks up a localized string similar to The upper bound must be strictly larger than the lower bound.. + + + + + Looks up a localized string similar to The upper bound must be at least as large as the lower bound.. + + + + + Looks up a localized string similar to Array length must be a multiple of {0}.. + + + + + Looks up a localized string similar to All vectors must have the same dimensionality.. + + + + + Looks up a localized string similar to The vector must have 3 dimensions.. + + + + + Looks up a localized string similar to The given array is too small. It must be at least {0} long.. + + + + + Looks up a localized string similar to Big endian files are not supported.. + + + + + Looks up a localized string similar to The supplied collection is empty.. + + + + + Looks up a localized string similar to Complex matrices are not supported.. + + + + + Looks up a localized string similar to An algorithm failed to converge.. + + + + + Looks up a localized string similar to The sample size must be larger than the given degrees of freedom.. + + + + + Looks up a localized string similar to This feature is not implemented yet (but is planned).. + + + + + Looks up a localized string similar to The given file doesn't exist.. + + + + + Looks up a localized string similar to Sample points should be sorted in strictly ascending order. + + + + + Looks up a localized string similar to All sample points should be unique.. + + + + + Looks up a localized string similar to Invalid parameterization for the distribution.. + + + + + Looks up a localized string similar to Invalid Left Boundary Condition.. + + + + + Looks up a localized string similar to The operation could not be performed because the accumulator is empty.. + + + + + Looks up a localized string similar to The operation could not be performed because the histogram is empty.. + + + + + Looks up a localized string similar to Not enough points in the distribution.. + + + + + Looks up a localized string similar to No Samples Provided. Preparation Required.. + + + + + Looks up a localized string similar to An invalid parameter was passed to a native method.. + + + + + Looks up a localized string similar to An invalid parameter was passed to a native method, parameter number : {0}. + + + + + Looks up a localized string similar to Invalid Right Boundary Condition.. + + + + + Looks up a localized string similar to Lag must be positive. + + + + + Looks up a localized string similar to Lag must be smaller than the sample size. + + + + + Looks up a localized string similar to ddd MMM dd HH:mm:ss yyyy. + + + + + Looks up a localized string similar to Matrices can not be empty and must have at least one row and column.. + + + + + Looks up a localized string similar to The number of columns of a matrix must be positive.. + + + + + Looks up a localized string similar to Matrix must be in sparse storage format. + + + + + Looks up a localized string similar to The number of rows of a matrix must be positive.. + + + + + Looks up a localized string similar to The number of rows or columns of a matrix must be positive.. + + + + + Looks up a localized string similar to Unable to allocate native memory.. + + + + + Looks up a localized string similar to Only 1 and 2 dimensional arrays are supported.. + + + + + Looks up a localized string similar to Data must contain at least {0} values.. + + + + + Looks up a localized string similar to Name cannot contain a space. name: {0}. + + + + + Looks up a localized string similar to {0} is not a supported type.. + + + + + Looks up a localized string similar to Algorithm experience a numerical break down + . + + + + + Looks up a localized string similar to The two arguments can't be compared (maybe they are part of a partial ordering?). + + + + + Looks up a localized string similar to The integer array does not represent a valid permutation.. + + + + + Looks up a localized string similar to The sampler's proposal distribution is not upper bounding the target density.. + + + + + Looks up a localized string similar to A regression of the requested order requires at least {0} samples. Only {1} samples have been provided. . + + + + + Looks up a localized string similar to The algorithm has failed, exceeded the number of iterations allowed or there is no root within the provided bounds.. + + + + + Looks up a localized string similar to The algorithm has failed, exceeded the number of iterations allowed or there is no root within the provided bounds. Consider to use RobustNewtonRaphson instead.. + + + + + Looks up a localized string similar to The lower and upper bounds must bracket a single root.. + + + + + Looks up a localized string similar to The algorithm ended without root in the range.. + + + + + Looks up a localized string similar to The number of rows must greater than or equal to the number of columns.. + + + + + Looks up a localized string similar to All sample vectors must have the same length. However, vectors with disagreeing length {0} and {1} have been provided. A sample with index i is given by the value at index i of each provided vector.. + + + + + Looks up a localized string similar to U is singular, and the inversion could not be completed.. + + + + + Looks up a localized string similar to U is singular, and the inversion could not be completed. The {0}-th diagonal element of the factor U is zero.. + + + + + Looks up a localized string similar to The singular vectors were not computed.. + + + + + Looks up a localized string similar to This special case is not supported yet (but is planned).. + + + + + Looks up a localized string similar to The given stop criterion already exist in the collection.. + + + + + Looks up a localized string similar to There is no stop criterion in the collection.. + + + + + Looks up a localized string similar to String parameter cannot be empty or null.. + + + + + Looks up a localized string similar to We only support sparse matrix with less than int.MaxValue elements.. + + + + + Looks up a localized string similar to The moment of the distribution is undefined.. + + + + + Looks up a localized string similar to A user defined provider has not been specified.. + + + + + Looks up a localized string similar to User work buffers are not supported by this provider.. + + + + + Looks up a localized string similar to Vectors can not be empty and must have at least one element.. + + + + + Looks up a localized string similar to The given work array is too small. Check work[0] for the corret size.. + + +
+
diff --git a/src/packages/MathNet.Numerics.3.16.0/lib/portable-net45+netcore45+MonoAndroid1+MonoTouch1/MathNet.Numerics.dll b/src/packages/MathNet.Numerics.3.16.0/lib/portable-net45+netcore45+MonoAndroid1+MonoTouch1/MathNet.Numerics.dll new file mode 100644 index 0000000..1be500c Binary files /dev/null and b/src/packages/MathNet.Numerics.3.16.0/lib/portable-net45+netcore45+MonoAndroid1+MonoTouch1/MathNet.Numerics.dll differ diff --git a/src/packages/MathNet.Numerics.3.16.0/lib/portable-net45+netcore45+wp8+MonoAndroid1+MonoTouch1/MathNet.Numerics.XML b/src/packages/MathNet.Numerics.3.16.0/lib/portable-net45+netcore45+wp8+MonoAndroid1+MonoTouch1/MathNet.Numerics.XML new file mode 100644 index 0000000..9c9b21f --- /dev/null +++ b/src/packages/MathNet.Numerics.3.16.0/lib/portable-net45+netcore45+wp8+MonoAndroid1+MonoTouch1/MathNet.Numerics.XML @@ -0,0 +1,49706 @@ + + + + MathNet.Numerics + + + + + Useful extension methods for Arrays. + + + + + Copies the values from on array to another. + + The source array. + The destination array. + + + + Copies the values from on array to another. + + The source array. + The destination array. + + + + Copies the values from on array to another. + + The source array. + The destination array. + + + + Copies the values from on array to another. + + The source array. + The destination array. + + + + Enumerative Combinatorics and Counting. + + + + + Count the number of possible variations without repetition. + The order matters and each object can be chosen only once. + + Number of elements in the set. + Number of elements to choose from the set. Each element is chosen at most once. + Maximum number of distinct variations. + + + + Count the number of possible variations with repetition. + The order matters and each object can be chosen more than once. + + Number of elements in the set. + Number of elements to choose from the set. Each element is chosen 0, 1 or multiple times. + Maximum number of distinct variations with repetition. + + + + Count the number of possible combinations without repetition. + The order does not matter and each object can be chosen only once. + + Number of elements in the set. + Number of elements to choose from the set. Each element is chosen at most once. + Maximum number of combinations. + + + + Count the number of possible combinations with repetition. + The order does not matter and an object can be chosen more than once. + + Number of elements in the set. + Number of elements to choose from the set. Each element is chosen 0, 1 or multiple times. + Maximum number of combinations with repetition. + + + + Count the number of possible permutations (without repetition). + + Number of (distinguishable) elements in the set. + Maximum number of permutations without repetition. + + + + Generate a random permutation, without repetition, by generating the index numbers 0 to N-1 and shuffle them randomly. + Implemented using Fisher-Yates Shuffling. + + An array of length N that contains (in any order) the integers of the interval [0, N). + Number of (distinguishable) elements in the set. + The random number generator to use. Optional; the default random source will be used if null. + + + + Select a random permutation, without repetition, from a data array by reordering the provided array in-place. + Implemented using Fisher-Yates Shuffling. The provided data array will be modified. + + The data array to be reordered. The array will be modified by this routine. + The random number generator to use. Optional; the default random source will be used if null. + + + + Select a random permutation from a data sequence by returning the provided data in random order. + Implemented using Fisher-Yates Shuffling. + + The data elements to be reordered. + The random number generator to use. Optional; the default random source will be used if null. + + + + Generate a random combination, without repetition, by randomly selecting some of N elements. + + Number of elements in the set. + The random number generator to use. Optional; the default random source will be used if null. + Boolean mask array of length N, for each item true if it is selected. + + + + Generate a random combination, without repetition, by randomly selecting k of N elements. + + Number of elements in the set. + Number of elements to choose from the set. Each element is chosen at most once. + The random number generator to use. Optional; the default random source will be used if null. + Boolean mask array of length N, for each item true if it is selected. + + + + Select a random combination, without repetition, from a data sequence by selecting k elements in original order. + + The data source to choose from. + Number of elements (k) to choose from the data set. Each element is chosen at most once. + The random number generator to use. Optional; the default random source will be used if null. + The chosen combination, in the original order. + + + + Generates a random combination, with repetition, by randomly selecting k of N elements. + + Number of elements in the set. + Number of elements to choose from the set. Elements can be chosen more than once. + The random number generator to use. Optional; the default random source will be used if null. + Integer mask array of length N, for each item the number of times it was selected. + + + + Select a random combination, with repetition, from a data sequence by selecting k elements in original order. + + The data source to choose from. + Number of elements (k) to choose from the data set. Elements can be chosen more than once. + The random number generator to use. Optional; the default random source will be used if null. + The chosen combination with repetition, in the original order. + + + + Generate a random variation, without repetition, by randomly selecting k of n elements with order. + Implemented using partial Fisher-Yates Shuffling. + + Number of elements in the set. + Number of elements to choose from the set. Each element is chosen at most once. + The random number generator to use. Optional; the default random source will be used if null. + An array of length K that contains the indices of the selections as integers of the interval [0, N). + + + + Select a random variation, without repetition, from a data sequence by randomly selecting k elements in random order. + Implemented using partial Fisher-Yates Shuffling. + + The data source to choose from. + Number of elements (k) to choose from the set. Each element is chosen at most once. + The random number generator to use. Optional; the default random source will be used if null. + The chosen variation, in random order. + + + + Generate a random variation, with repetition, by randomly selecting k of n elements with order. + + Number of elements in the set. + Number of elements to choose from the set. Elements can be chosen more than once. + The random number generator to use. Optional; the default random source will be used if null. + An array of length K that contains the indices of the selections as integers of the interval [0, N). + + + + Select a random variation, with repetition, from a data sequence by randomly selecting k elements in random order. + + The data source to choose from. + Number of elements (k) to choose from the data set. Elements can be chosen more than once. + The random number generator to use. Optional; the default random source will be used if null. + The chosen variation with repetition, in random order. + + + + 32-bit single precision complex numbers class. + + + + The class Complex32 provides all elementary operations + on complex numbers. All the operators +, -, + *, /, ==, != are defined in the + canonical way. Additional complex trigonometric functions + are also provided. Note that the Complex32 structures + has two special constant values and + . + + + + Complex32 x = new Complex32(1f,2f); + Complex32 y = Complex32.FromPolarCoordinates(1f, Math.Pi); + Complex32 z = (x + y) / (x - y); + + + + For mathematical details about complex numbers, please + have a look at the + Wikipedia + + + + + + The real component of the complex number. + + + + + The imaginary component of the complex number. + + + + + Initializes a new instance of the Complex32 structure with the given real + and imaginary parts. + + The value for the real component. + The value for the imaginary component. + + + + Creates a complex number from a point's polar coordinates. + + A complex number. + The magnitude, which is the distance from the origin (the intersection of the x-axis and the y-axis) to the number. + The phase, which is the angle from the line to the horizontal axis, measured in radians. + + + + Returns a new instance + with a real number equal to zero and an imaginary number equal to zero. + + + + + Returns a new instance + with a real number equal to one and an imaginary number equal to zero. + + + + + Returns a new instance + with a real number equal to zero and an imaginary number equal to one. + + + + + Returns a new instance + with real and imaginary numbers positive infinite. + + + + + Returns a new instance + with real and imaginary numbers not a number. + + + + + Gets the real component of the complex number. + + The real component of the complex number. + + + + Gets the real imaginary component of the complex number. + + The real imaginary component of the complex number. + + + + Gets the phase or argument of this Complex32. + + + Phase always returns a value bigger than negative Pi and + smaller or equal to Pi. If this Complex32 is zero, the Complex32 + is assumed to be positive real with an argument of zero. + + The phase or argument of this Complex32 + + + + Gets the magnitude (or absolute value) of a complex number. + + Assuming that magnitude of (inf,a) and (a,inf) and (inf,inf) is inf and (NaN,a), (a,NaN) and (NaN,NaN) is NaN + The magnitude of the current instance. + + + + Gets the squared magnitude (or squared absolute value) of a complex number. + + The squared magnitude of the current instance. + + + + Gets the unity of this complex (same argument, but on the unit circle; exp(I*arg)) + + The unity of this Complex32. + + + + Gets a value indicating whether the Complex32 is zero. + + true if this instance is zero; otherwise, false. + + + + Gets a value indicating whether the Complex32 is one. + + true if this instance is one; otherwise, false. + + + + Gets a value indicating whether the Complex32 is the imaginary unit. + + true if this instance is ImaginaryOne; otherwise, false. + + + + Gets a value indicating whether the provided Complex32evaluates + to a value that is not a number. + + + true if this instance is ; otherwise, + false. + + + + + Gets a value indicating whether the provided Complex32 evaluates to an + infinite value. + + + true if this instance is infinite; otherwise, false. + + + True if it either evaluates to a complex infinity + or to a directed infinity. + + + + + Gets a value indicating whether the provided Complex32 is real. + + true if this instance is a real number; otherwise, false. + + + + Gets a value indicating whether the provided Complex32 is real and not negative, that is >= 0. + + + true if this instance is real nonnegative number; otherwise, false. + + + + + Exponential of this Complex32 (exp(x), E^x). + + + The exponential of this complex number. + + + + + Natural Logarithm of this Complex32 (Base E). + + The natural logarithm of this complex number. + + + + Common Logarithm of this Complex32 (Base 10). + + The common logarithm of this complex number. + + + + Logarithm of this Complex32 with custom base. + + The logarithm of this complex number. + + + + Raise this Complex32 to the given value. + + + The exponent. + + + The complex number raised to the given exponent. + + + + + Raise this Complex32 to the inverse of the given value. + + + The root exponent. + + + The complex raised to the inverse of the given exponent. + + + + + The Square (power 2) of this Complex32 + + + The square of this complex number. + + + + + The Square Root (power 1/2) of this Complex32 + + + The square root of this complex number. + + + + + Evaluate all square roots of this Complex32. + + + + + Evaluate all cubic roots of this Complex32. + + + + + Equality test. + + One of complex numbers to compare. + The other complex numbers to compare. + true if the real and imaginary components of the two complex numbers are equal; false otherwise. + + + + Inequality test. + + One of complex numbers to compare. + The other complex numbers to compare. + true if the real or imaginary components of the two complex numbers are not equal; false otherwise. + + + + Unary addition. + + The complex number to operate on. + Returns the same complex number. + + + + Unary minus. + + The complex number to operate on. + The negated value of the . + + + Addition operator. Adds two complex numbers together. + The result of the addition. + One of the complex numbers to add. + The other complex numbers to add. + + + Subtraction operator. Subtracts two complex numbers. + The result of the subtraction. + The complex number to subtract from. + The complex number to subtract. + + + Addition operator. Adds a complex number and float together. + The result of the addition. + The complex numbers to add. + The float value to add. + + + Subtraction operator. Subtracts float value from a complex value. + The result of the subtraction. + The complex number to subtract from. + The float value to subtract. + + + Addition operator. Adds a complex number and float together. + The result of the addition. + The float value to add. + The complex numbers to add. + + + Subtraction operator. Subtracts complex value from a float value. + The result of the subtraction. + The float vale to subtract from. + The complex value to subtract. + + + Multiplication operator. Multiplies two complex numbers. + The result of the multiplication. + One of the complex numbers to multiply. + The other complex number to multiply. + + + Multiplication operator. Multiplies a complex number with a float value. + The result of the multiplication. + The float value to multiply. + The complex number to multiply. + + + Multiplication operator. Multiplies a complex number with a float value. + The result of the multiplication. + The complex number to multiply. + The float value to multiply. + + + Division operator. Divides a complex number by another. + Enhanced Smith's algorithm for dividing two complex numbers + + The result of the division. + The dividend. + The divisor. + + + + Helper method for dividing. + + Re first + Im first + Re second + Im second + + + + + Division operator. Divides a float value by a complex number. + Algorithm based on Smith's algorithm + + The result of the division. + The dividend. + The divisor. + + + Division operator. Divides a complex number by a float value. + The result of the division. + The dividend. + The divisor. + + + + Computes the conjugate of a complex number and returns the result. + + + + + Returns the multiplicative inverse of a complex number. + + + + + Converts the value of the current complex number to its equivalent string representation in Cartesian form. + + The string representation of the current instance in Cartesian form. + + + + Converts the value of the current complex number to its equivalent string representation + in Cartesian form by using the specified format for its real and imaginary parts. + + The string representation of the current instance in Cartesian form. + A standard or custom numeric format string. + + is not a valid format string. + + + + Converts the value of the current complex number to its equivalent string representation + in Cartesian form by using the specified culture-specific formatting information. + + The string representation of the current instance in Cartesian form, as specified by . + An object that supplies culture-specific formatting information. + + + Converts the value of the current complex number to its equivalent string representation + in Cartesian form by using the specified format and culture-specific format information for its real and imaginary parts. + The string representation of the current instance in Cartesian form, as specified by and . + A standard or custom numeric format string. + An object that supplies culture-specific formatting information. + + is not a valid format string. + + + + Checks if two complex numbers are equal. Two complex numbers are equal if their + corresponding real and imaginary components are equal. + + + Returns true if the two objects are the same object, or if their corresponding + real and imaginary components are equal, false otherwise. + + + The complex number to compare to with. + + + + + The hash code for the complex number. + + + The hash code of the complex number. + + + The hash code is calculated as + System.Math.Exp(ComplexMath.Absolute(complexNumber)). + + + + + Checks if two complex numbers are equal. Two complex numbers are equal if their + corresponding real and imaginary components are equal. + + + Returns true if the two objects are the same object, or if their corresponding + real and imaginary components are equal, false otherwise. + + + The complex number to compare to with. + + + + + Creates a complex number based on a string. The string can be in the + following formats (without the quotes): 'n', 'ni', 'n +/- ni', + 'ni +/- n', 'n,n', 'n,ni,' '(n,n)', or '(n,ni)', where n is a float. + + + A complex number containing the value specified by the given string. + + + the string to parse. + + + An that supplies culture-specific + formatting information. + + + + + Parse a part (real or complex) from a complex number. + + Start Token. + Is set to true if the part identified itself as being imaginary. + + An that supplies culture-specific + formatting information. + + Resulting part as float. + + + + + Converts the string representation of a complex number to a single-precision complex number equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex number to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will contain complex32.Zero. This parameter is passed uninitialized + + + + + Converts the string representation of a complex number to single-precision complex number equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex number to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will contain complex32.Zero. This parameter is passed uninitialized + + + + + Explicit conversion of a real decimal to a Complex32. + + The decimal value to convert. + The result of the conversion. + + + + Explicit conversion of a Complex to a Complex32. + + The decimal value to convert. + The result of the conversion. + + + + Implicit conversion of a real byte to a Complex32. + + The byte value to convert. + The result of the conversion. + + + + Implicit conversion of a real short to a Complex32. + + The short value to convert. + The result of the conversion. + + + + Implicit conversion of a signed byte to a Complex32. + + The signed byte value to convert. + The result of the conversion. + + + + Implicit conversion of a unsgined real short to a Complex32. + + The unsgined short value to convert. + The result of the conversion. + + + + Implicit conversion of a real int to a Complex32. + + The int value to convert. + The result of the conversion. + + + + Implicit conversion of a real long to a Complex32. + + The long value to convert. + The result of the conversion. + + + + Implicit conversion of a real uint to a Complex32. + + The uint value to convert. + The result of the conversion. + + + + Implicit conversion of a real ulong to a Complex32. + + The ulong value to convert. + The result of the conversion. + + + + Implicit conversion of a real float to a Complex32. + + The float value to convert. + The result of the conversion. + + + + Implicit conversion of a real double to a Complex32. + + The double value to convert. + The result of the conversion. + + + + Converts this Complex32 to a . + + A with the same values as this Complex32. + + + + Returns the additive inverse of a specified complex number. + + The result of the real and imaginary components of the value parameter multiplied by -1. + A complex number. + + + + Computes the conjugate of a complex number and returns the result. + + The conjugate of . + A complex number. + + + + Adds two complex numbers and returns the result. + + The sum of and . + The first complex number to add. + The second complex number to add. + + + + Subtracts one complex number from another and returns the result. + + The result of subtracting from . + The value to subtract from (the minuend). + The value to subtract (the subtrahend). + + + + Returns the product of two complex numbers. + + The product of the and parameters. + The first complex number to multiply. + The second complex number to multiply. + + + + Divides one complex number by another and returns the result. + + The quotient of the division. + The complex number to be divided. + The complex number to divide by. + + + + Returns the multiplicative inverse of a complex number. + + The reciprocal of . + A complex number. + + + + Returns the square root of a specified complex number. + + The square root of . + A complex number. + + + + Gets the absolute value (or magnitude) of a complex number. + + The absolute value of . + A complex number. + + + + Returns e raised to the power specified by a complex number. + + The number e raised to the power . + A complex number that specifies a power. + + + + Returns a specified complex number raised to a power specified by a complex number. + + The complex number raised to the power . + A complex number to be raised to a power. + A complex number that specifies a power. + + + + Returns a specified complex number raised to a power specified by a single-precision floating-point number. + + The complex number raised to the power . + A complex number to be raised to a power. + A single-precision floating-point number that specifies a power. + + + + Returns the natural (base e) logarithm of a specified complex number. + + The natural (base e) logarithm of . + A complex number. + + + + Returns the logarithm of a specified complex number in a specified base. + + The logarithm of in base . + A complex number. + The base of the logarithm. + + + + Returns the base-10 logarithm of a specified complex number. + + The base-10 logarithm of . + A complex number. + + + + Returns the sine of the specified complex number. + + The sine of . + A complex number. + + + + Returns the cosine of the specified complex number. + + The cosine of . + A complex number. + + + + Returns the tangent of the specified complex number. + + The tangent of . + A complex number. + + + + Returns the angle that is the arc sine of the specified complex number. + + The angle which is the arc sine of . + A complex number. + + + + Returns the angle that is the arc cosine of the specified complex number. + + The angle, measured in radians, which is the arc cosine of . + A complex number that represents a cosine. + + + + Returns the angle that is the arc tangent of the specified complex number. + + The angle that is the arc tangent of . + A complex number. + + + + Returns the hyperbolic sine of the specified complex number. + + The hyperbolic sine of . + A complex number. + + + + Returns the hyperbolic cosine of the specified complex number. + + The hyperbolic cosine of . + A complex number. + + + + Returns the hyperbolic tangent of the specified complex number. + + The hyperbolic tangent of . + A complex number. + + + + 64-bit double precision complex numbers class. + + + + The class Complex provides all elementary operations + on complex numbers. All the operators +, -, + *, /, ==, != are defined in the + canonical way. Additional complex trigonometric functions + are also provided. Note that the Complex structures + has two special constant values and + . + + + + Complex x = new Complex(1d, 2d); + Complex y = Complex.FromPolarCoordinates(1d, Math.Pi); + Complex z = (x + y) / (x - y); + + + + For mathematical details about complex numbers, please + have a look at the + Wikipedia + + + + + + The real component of the complex number. + + + + + The imaginary component of the complex number. + + + + + Initializes a new instance of the Complex structure with the given real + and imaginary parts. + + The value for the real component. + The value for the imaginary component. + + + + Creates a complex number from a point's polar coordinates. + + A complex number. + The magnitude, which is the distance from the origin (the intersection of the x-axis and the y-axis) to the number. + The phase, which is the angle from the line to the horizontal axis, measured in radians. + + + + Returns a new Complex instance + with a real number equal to zero and an imaginary number equal to zero. + + + + + Returns a new Complex instance + with a real number equal to one and an imaginary number equal to zero. + + + + + Returns a new Complex instance + with a real number equal to zero and an imaginary number equal to one. + + + + + Returns a new Complex instance + with real and imaginary numbers positive infinite. + + + + + Returns a new Complex instance + with real and imaginary numbers not a number. + + + + + Gets the real component of the complex number. + + The real component of the complex number. + + + + Gets the real imaginary component of the complex number. + + The real imaginary component of the complex number. + + + + Gets the phase or argument of this Complex. + + + Phase always returns a value bigger than negative Pi and + smaller or equal to Pi. If this Complex is zero, the Complex + is assumed to be positive real with an argument of zero. + + The phase or argument of this Complex + + + + Gets the magnitude (or absolute value) of a complex number. + + The magnitude of the current instance. + + + + Equality test. + + One of complex numbers to compare. + The other complex numbers to compare. + true if the real and imaginary components of the two complex numbers are equal; false otherwise. + + + + Inequality test. + + One of complex numbers to compare. + The other complex numbers to compare. + true if the real or imaginary components of the two complex numbers are not equal; false otherwise. + + + + Unary addition. + + The complex number to operate on. + Returns the same complex number. + + + + Unary minus. + + The complex number to operate on. + The negated value of the . + + + Addition operator. Adds two complex numbers together. + The result of the addition. + One of the complex numbers to add. + The other complex numbers to add. + + + Subtraction operator. Subtracts two complex numbers. + The result of the subtraction. + The complex number to subtract from. + The complex number to subtract. + + + Addition operator. Adds a complex number and double together. + The result of the addition. + The complex numbers to add. + The double value to add. + + + Subtraction operator. Subtracts double value from a complex value. + The result of the subtraction. + The complex number to subtract from. + The double value to subtract. + + + Addition operator. Adds a complex number and double together. + The result of the addition. + The double value to add. + The complex numbers to add. + + + Subtraction operator. Subtracts complex value from a double value. + The result of the subtraction. + The double vale to subtract from. + The complex value to subtract. + + + Multiplication operator. Multiplies two complex numbers. + The result of the multiplication. + One of the complex numbers to multiply. + The other complex number to multiply. + + + Multiplication operator. Multiplies a complex number with a double value. + The result of the multiplication. + The double value to multiply. + The complex number to multiply. + + + Multiplication operator. Multiplies a complex number with a double value. + The result of the multiplication. + The complex number to multiply. + The double value to multiply. + + + Division operator. Divides a complex number by another. + The result of the division. + The dividend. + The divisor. + + + Division operator. Divides a double value by a complex number. + The result of the division. + The dividend. + The divisor. + + + Division operator. Divides a complex number by a double value. + The result of the division. + The dividend. + The divisor. + + + + A string representation of this complex number. + + + The string representation of this complex number. + + + + + A string representation of this complex number. + + + The string representation of this complex number formatted as specified by the + format string. + + + A format specification. + + + + + A string representation of this complex number. + + + The string representation of this complex number formatted as specified by the + format provider. + + + An that supplies culture-specific formatting information. + + + + + A string representation of this complex number. + + + The string representation of this complex number formatted as specified by the + format string and format provider. + + + if the n, is not a number. + + + if s, is . + + + A format specification. + + + An that supplies culture-specific formatting information. + + + + + Checks if two complex numbers are equal. Two complex numbers are equal if their + corresponding real and imaginary components are equal. + + + Returns true if the two objects are the same object, or if their corresponding + real and imaginary components are equal, false otherwise. + + + The complex number to compare to with. + + + + + The hash code for the complex number. + + + The hash code of the complex number. + + + The hash code is calculated as + System.Math.Exp(ComplexMath.Absolute(complexNumber)). + + + + + Checks if two complex numbers are equal. Two complex numbers are equal if their + corresponding real and imaginary components are equal. + + + Returns true if the two objects are the same object, or if their corresponding + real and imaginary components are equal, false otherwise. + + + The complex number to compare to with. + + + + + Returns a Norm of a value of this type, which is appropriate for measuring how + close this value is to zero. + + + A norm of this value. + + + + + Returns a Norm of the difference of two values of this type, which is + appropriate for measuring how close together these two values are. + + + The value to compare with. + + + A norm of the difference between this and the other value. + + + + + Creates a complex number based on a string. The string can be in the + following formats (without the quotes): 'n', 'ni', 'n +/- ni', + 'ni +/- n', 'n,n', 'n,ni,' '(n,n)', or '(n,ni)', where n is a double. + + + A complex number containing the value specified by the given string. + + + The string to parse. + + + + + Creates a complex number based on a string. The string can be in the + following formats (without the quotes): 'n', 'ni', 'n +/- ni', + 'ni +/- n', 'n,n', 'n,ni,' '(n,n)', or '(n,ni)', where n is a double. + + + A complex number containing the value specified by the given string. + + + the string to parse. + + + An that supplies culture-specific + formatting information. + + + + + Parse a part (real or complex) from a complex number. + + Start Token. + Is set to true if the part identified itself as being imaginary. + + An that supplies culture-specific + formatting information. + + Resulting part as double. + + + + + Converts the string representation of a complex number to a single-precision complex number equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex number to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will contain complex32.Zero. This parameter is passed uninitialized + + + + + Converts the string representation of a complex number to single-precision complex number equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex number to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will contain complex32.Zero. This parameter is passed uninitialized + + + + + Explicit conversion of a real decimal to a Complex. + + The decimal value to convert. + The result of the conversion. + + + + Explicit conversion of a Complex to a Complex. + + The decimal value to convert. + The result of the conversion. + + + + Implicit conversion of a real byte to a Complex. + + The byte value to convert. + The result of the conversion. + + + + Implicit conversion of a real short to a Complex. + + The short value to convert. + The result of the conversion. + + + + Implicit conversion of a signed byte to a Complex. + + The signed byte value to convert. + The result of the conversion. + + + + Implicit conversion of a unsgined real short to a Complex. + + The unsgined short value to convert. + The result of the conversion. + + + + Implicit conversion of a real int to a Complex. + + The int value to convert. + The result of the conversion. + + + + Implicit conversion of a real long to a Complex. + + The long value to convert. + The result of the conversion. + + + + Implicit conversion of a real uint to a Complex. + + The uint value to convert. + The result of the conversion. + + + + Implicit conversion of a real ulong to a Complex. + + The ulong value to convert. + The result of the conversion. + + + + Implicit conversion of a real double to a Complex. + + The double value to convert. + The result of the conversion. + + + + Implicit conversion of a real float to a Complex. + + The double value to convert. + The result of the conversion. + + + + Converts this Complex to a . + + A with the same values as this Complex. + + + + Returns the additive inverse of a specified complex number. + + The result of the and components of the parameter multiplied by -1. + A complex number. + + + + Computes the conjugate of a complex number and returns the result. + + The conjugate of . + A complex number. + + + + Adds two complex numbers and returns the result. + + The sum of and . + The first complex number to add. + The second complex number to add. + + + + Subtracts one complex number from another and returns the result. + + The result of subtracting from . + The value to subtract from (the minuend). + The value to subtract (the subtrahend). + + + + Returns the product of two complex numbers. + + The product of the and parameters. + The first complex number to multiply. + The second complex number to multiply. + + + + Divides one complex number by another and returns the result. + + The quotient of the division. + The complex number to be divided. + The complex number to divide by. + + + + Returns the multiplicative inverse of a complex number. + + The reciprocal of . + A complex number. + + + + Returns the square root of a specified complex number. + + The square root of . + A complex number. + + + + Gets the absolute value (or magnitude) of a complex number. + + A complex number. + The absolute value (or magnitude) of a complex number. + + + + Returns e raised to the power specified by a complex number. + + The number e raised to the power . + A complex number that specifies a power. + + + + Returns a specified complex number raised to a power specified by a complex number. + + The complex number raised to the power . + A complex number to be raised to a power. + A complex number that specifies a power. + + + + Returns a specified complex number raised to a power specified by a double-precision floating-point number. + + The complex number raised to the power . + A complex number to be raised to a power. + A double-precision floating-point number that specifies a power. + + + + Returns the natural (base e) logarithm of a specified complex number. + + The natural (base e) logarithm of . + A complex number. + + + + Returns the logarithm of a specified complex number in a specified base. + + The logarithm of in base . + A complex number. + The base of the logarithm. + + + + Returns the base-10 logarithm of a specified complex number. + + The base-10 logarithm of . + A complex number. + + + + Returns the sine of the specified complex number. + + The sine of . + A complex number. + + + + Returns the cosine of the specified complex number. + + The cosine of . + A complex number. + + + + Returns the tangent of the specified complex number. + + The tangent of . + A complex number. + + + + Returns the angle that is the arc sine of the specified complex number. + + The angle which is the arc sine of . + A complex number. + + + + Returns the angle that is the arc cosine of the specified complex number. + + The angle, measured in radians, which is the arc cosine of . + A complex number that represents a cosine. + + + + Returns the angle that is the arc tangent of the specified complex number. + + The angle that is the arc tangent of . + A complex number. + + + + Returns the hyperbolic sine of the specified complex number. + + The hyperbolic sine of . + A complex number. + + + + Returns the hyperbolic cosine of the specified complex number. + + The hyperbolic cosine of . + A complex number. + + + + Returns the hyperbolic tangent of the specified complex number. + + The hyperbolic tangent of . + A complex number. + + + + Extension methods for the Complex type provided by System.Numerics + + + + + Gets the squared magnitude of the Complex number. + + The number to perfom this operation on. + The squared magnitude of the Complex number. + + + + Gets the unity of this complex (same argument, but on the unit circle; exp(I*arg)) + + The unity of this Complex. + + + + Gets the conjugate of the Complex number. + + The number to perfom this operation on. + + The semantic of setting the conjugate is such that + + // a, b of type Complex32 + a.Conjugate = b; + + is equivalent to + + // a, b of type Complex32 + a = b.Conjugate + + + The conjugate of the number. + + + + Returns the multiplicative inverse of a complex number. + + + + + Exponential of this Complex (exp(x), E^x). + + The number to perfom this operation on. + + The exponential of this complex number. + + + + + Natural Logarithm of this Complex (Base E). + + The number to perfom this operation on. + + The natural logarithm of this complex number. + + + + + Common Logarithm of this Complex (Base 10). + + The common logarithm of this complex number. + + + + Logarithm of this Complex with custom base. + + The logarithm of this complex number. + + + + Raise this Complex to the given value. + + The number to perfom this operation on. + + The exponent. + + + The complex number raised to the given exponent. + + + + + Raise this Complex to the inverse of the given value. + + The number to perfom this operation on. + + The root exponent. + + + The complex raised to the inverse of the given exponent. + + + + + The Square (power 2) of this Complex + + The number to perfom this operation on. + + The square of this complex number. + + + + + The Square Root (power 1/2) of this Complex + + The number to perfom this operation on. + + The square root of this complex number. + + + + + Evaluate all square roots of this Complex. + + + + + Evaluate all cubic roots of this Complex. + + + + + Gets a value indicating whether the Complex32 is zero. + + The number to perfom this operation on. + true if this instance is zero; otherwise, false. + + + + Gets a value indicating whether the Complex32 is one. + + The number to perfom this operation on. + true if this instance is one; otherwise, false. + + + + Gets a value indicating whether the Complex32 is the imaginary unit. + + true if this instance is ImaginaryOne; otherwise, false. + The number to perfom this operation on. + + + + Gets a value indicating whether the provided Complex32evaluates + to a value that is not a number. + + The number to perfom this operation on. + + true if this instance is NaN; otherwise, + false. + + + + + Gets a value indicating whether the provided Complex32 evaluates to an + infinite value. + + The number to perfom this operation on. + + true if this instance is infinite; otherwise, false. + + + True if it either evaluates to a complex infinity + or to a directed infinity. + + + + + Gets a value indicating whether the provided Complex32 is real. + + The number to perfom this operation on. + true if this instance is a real number; otherwise, false. + + + + Gets a value indicating whether the provided Complex32 is real and not negative, that is >= 0. + + The number to perfom this operation on. + + true if this instance is real nonnegative number; otherwise, false. + + + + + Returns a Norm of a value of this type, which is appropriate for measuring how + close this value is to zero. + + + + + Returns a Norm of a value of this type, which is appropriate for measuring how + close this value is to zero. + + + + + Returns a Norm of the difference of two values of this type, which is + appropriate for measuring how close together these two values are. + + + + + Returns a Norm of the difference of two values of this type, which is + appropriate for measuring how close together these two values are. + + + + + Creates a complex number based on a string. The string can be in the + following formats (without the quotes): 'n', 'ni', 'n +/- ni', + 'ni +/- n', 'n,n', 'n,ni,' '(n,n)', or '(n,ni)', where n is a double. + + + A complex number containing the value specified by the given string. + + + The string to parse. + + + + + Creates a complex number based on a string. The string can be in the + following formats (without the quotes): 'n', 'ni', 'n +/- ni', + 'ni +/- n', 'n,n', 'n,ni,' '(n,n)', or '(n,ni)', where n is a double. + + + A complex number containing the value specified by the given string. + + + the string to parse. + + + An that supplies culture-specific + formatting information. + + + + + Parse a part (real or complex) from a complex number. + + Start Token. + Is set to true if the part identified itself as being imaginary. + + An that supplies culture-specific + formatting information. + + Resulting part as double. + + + + + Converts the string representation of a complex number to a double-precision complex number equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex number to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will contain Complex.Zero. This parameter is passed uninitialized. + + + + + Converts the string representation of a complex number to double-precision complex number equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex number to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will contain complex32.Zero. This parameter is passed uninitialized + + + + + Creates a Complex32 number based on a string. The string can be in the + following formats (without the quotes): 'n', 'ni', 'n +/- ni', + 'ni +/- n', 'n,n', 'n,ni,' '(n,n)', or '(n,ni)', where n is a double. + + + A complex number containing the value specified by the given string. + + + the string to parse. + + + + + Creates a Complex32 number based on a string. The string can be in the + following formats (without the quotes): 'n', 'ni', 'n +/- ni', + 'ni +/- n', 'n,n', 'n,ni,' '(n,n)', or '(n,ni)', where n is a double. + + + A complex number containing the value specified by the given string. + + + the string to parse. + + + An that supplies culture-specific + formatting information. + + + + + Converts the string representation of a complex number to a single-precision complex number equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex number to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will contain complex32.Zero. This parameter is passed uninitialized. + + + + + Converts the string representation of a complex number to single-precision complex number equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex number to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will contain Complex.Zero. This parameter is passed uninitialized. + + + + + A collection of frequently used mathematical constants. + + + + The number e + + + The number log[2](e) + + + The number log[10](e) + + + The number log[e](2) + + + The number log[e](10) + + + The number log[e](pi) + + + The number log[e](2*pi)/2 + + + The number 1/e + + + The number sqrt(e) + + + The number sqrt(2) + + + The number sqrt(3) + + + The number sqrt(1/2) = 1/sqrt(2) = sqrt(2)/2 + + + The number sqrt(3)/2 + + + The number pi + + + The number pi*2 + + + The number pi/2 + + + The number pi*3/2 + + + The number pi/4 + + + The number sqrt(pi) + + + The number sqrt(2pi) + + + The number sqrt(2*pi*e) + + + The number log(sqrt(2*pi)) + + + The number log(sqrt(2*pi*e)) + + + The number log(2 * sqrt(e / pi)) + + + The number 1/pi + + + The number 2/pi + + + The number 1/sqrt(pi) + + + The number 1/sqrt(2pi) + + + The number 2/sqrt(pi) + + + The number 2 * sqrt(e / pi) + + + The number (pi)/180 - factor to convert from Degree (deg) to Radians (rad). + + + + + The number (pi)/200 - factor to convert from NewGrad (grad) to Radians (rad). + + + + + The number ln(10)/20 - factor to convert from Power Decibel (dB) to Neper (Np). Use this version when the Decibel represent a power gain but the compared values are not powers (e.g. amplitude, current, voltage). + + + The number ln(10)/10 - factor to convert from Neutral Decibel (dB) to Neper (Np). Use this version when either both or neither of the Decibel and the compared values represent powers. + + + The Catalan constant + Sum(k=0 -> inf){ (-1)^k/(2*k + 1)2 } + + + The Euler-Mascheroni constant + lim(n -> inf){ Sum(k=1 -> n) { 1/k - log(n) } } + + + The number (1+sqrt(5))/2, also known as the golden ratio + + + The Glaisher constant + e^(1/12 - Zeta(-1)) + + + The Khinchin constant + prod(k=1 -> inf){1+1/(k*(k+2))^log(k,2)} + + + + The size of a double in bytes. + + + + + The size of an int in bytes. + + + + + The size of a float in bytes. + + + + + The size of a Complex in bytes. + + + + + The size of a Complex in bytes. + + + + Speed of Light in Vacuum: c_0 = 2.99792458e8 [m s^-1] (defined, exact; 2007 CODATA) + + + Magnetic Permeability in Vacuum: mu_0 = 4*Pi * 10^-7 [N A^-2 = kg m A^-2 s^-2] (defined, exact; 2007 CODATA) + + + Electric Permittivity in Vacuum: epsilon_0 = 1/(mu_0*c_0^2) [F m^-1 = A^2 s^4 kg^-1 m^-3] (defined, exact; 2007 CODATA) + + + Characteristic Impedance of Vacuum: Z_0 = mu_0*c_0 [Ohm = m^2 kg s^-3 A^-2] (defined, exact; 2007 CODATA) + + + Newtonian Constant of Gravitation: G = 6.67429e-11 [m^3 kg^-1 s^-2] (2007 CODATA) + + + Planck's constant: h = 6.62606896e-34 [J s = m^2 kg s^-1] (2007 CODATA) + + + Reduced Planck's constant: h_bar = h / (2*Pi) [J s = m^2 kg s^-1] (2007 CODATA) + + + Planck mass: m_p = (h_bar*c_0/G)^(1/2) [kg] (2007 CODATA) + + + Planck temperature: T_p = (h_bar*c_0^5/G)^(1/2)/k [K] (2007 CODATA) + + + Planck length: l_p = h_bar/(m_p*c_0) [m] (2007 CODATA) + + + Planck time: t_p = l_p/c_0 [s] (2007 CODATA) + + + Elementary Electron Charge: e = 1.602176487e-19 [C = A s] (2007 CODATA) + + + Magnetic Flux Quantum: theta_0 = h/(2*e) [Wb = m^2 kg s^-2 A^-1] (2007 CODATA) + + + Conductance Quantum: G_0 = 2*e^2/h [S = m^-2 kg^-1 s^3 A^2] (2007 CODATA) + + + Josephson Constant: K_J = 2*e/h [Hz V^-1] (2007 CODATA) + + + Von Klitzing Constant: R_K = h/e^2 [Ohm = m^2 kg s^-3 A^-2] (2007 CODATA) + + + Bohr Magneton: mu_B = e*h_bar/2*m_e [J T^-1] (2007 CODATA) + + + Nuclear Magneton: mu_N = e*h_bar/2*m_p [J T^-1] (2007 CODATA) + + + Fine Structure Constant: alpha = e^2/4*Pi*e_0*h_bar*c_0 [1] (2007 CODATA) + + + Rydberg Constant: R_infty = alpha^2*m_e*c_0/2*h [m^-1] (2007 CODATA) + + + Bor Radius: a_0 = alpha/4*Pi*R_infty [m] (2007 CODATA) + + + Hartree Energy: E_h = 2*R_infty*h*c_0 [J] (2007 CODATA) + + + Quantum of Circulation: h/2*m_e [m^2 s^-1] (2007 CODATA) + + + Fermi Coupling Constant: G_F/(h_bar*c_0)^3 [GeV^-2] (2007 CODATA) + + + Weak Mixin Angle: sin^2(theta_W) [1] (2007 CODATA) + + + Electron Mass: [kg] (2007 CODATA) + + + Electron Mass Energy Equivalent: [J] (2007 CODATA) + + + Electron Molar Mass: [kg mol^-1] (2007 CODATA) + + + Electron Compton Wavelength: [m] (2007 CODATA) + + + Classical Electron Radius: [m] (2007 CODATA) + + + Tomson Cross Section: [m^2] (2002 CODATA) + + + Electron Magnetic Moment: [J T^-1] (2007 CODATA) + + + Electon G-Factor: [1] (2007 CODATA) + + + Muon Mass: [kg] (2007 CODATA) + + + Muon Mass Energy Equivalent: [J] (2007 CODATA) + + + Muon Molar Mass: [kg mol^-1] (2007 CODATA) + + + Muon Compton Wavelength: [m] (2007 CODATA) + + + Muon Magnetic Moment: [J T^-1] (2007 CODATA) + + + Muon G-Factor: [1] (2007 CODATA) + + + Tau Mass: [kg] (2007 CODATA) + + + Tau Mass Energy Equivalent: [J] (2007 CODATA) + + + Tau Molar Mass: [kg mol^-1] (2007 CODATA) + + + Tau Compton Wavelength: [m] (2007 CODATA) + + + Proton Mass: [kg] (2007 CODATA) + + + Proton Mass Energy Equivalent: [J] (2007 CODATA) + + + Proton Molar Mass: [kg mol^-1] (2007 CODATA) + + + Proton Compton Wavelength: [m] (2007 CODATA) + + + Proton Magnetic Moment: [J T^-1] (2007 CODATA) + + + Proton G-Factor: [1] (2007 CODATA) + + + Proton Shielded Magnetic Moment: [J T^-1] (2007 CODATA) + + + Proton Gyro-Magnetic Ratio: [s^-1 T^-1] (2007 CODATA) + + + Proton Shielded Gyro-Magnetic Ratio: [s^-1 T^-1] (2007 CODATA) + + + Neutron Mass: [kg] (2007 CODATA) + + + Neutron Mass Energy Equivalent: [J] (2007 CODATA) + + + Neutron Molar Mass: [kg mol^-1] (2007 CODATA) + + + Neuron Compton Wavelength: [m] (2007 CODATA) + + + Neutron Magnetic Moment: [J T^-1] (2007 CODATA) + + + Neutron G-Factor: [1] (2007 CODATA) + + + Neutron Gyro-Magnetic Ratio: [s^-1 T^-1] (2007 CODATA) + + + Deuteron Mass: [kg] (2007 CODATA) + + + Deuteron Mass Energy Equivalent: [J] (2007 CODATA) + + + Deuteron Molar Mass: [kg mol^-1] (2007 CODATA) + + + Deuteron Magnetic Moment: [J T^-1] (2007 CODATA) + + + Helion Mass: [kg] (2007 CODATA) + + + Helion Mass Energy Equivalent: [J] (2007 CODATA) + + + Helion Molar Mass: [kg mol^-1] (2007 CODATA) + + + Avogadro constant: [mol^-1] (2010 CODATA) + + + The SI prefix factor corresponding to 1 000 000 000 000 000 000 000 000 + + + The SI prefix factor corresponding to 1 000 000 000 000 000 000 000 + + + The SI prefix factor corresponding to 1 000 000 000 000 000 000 + + + The SI prefix factor corresponding to 1 000 000 000 000 000 + + + The SI prefix factor corresponding to 1 000 000 000 000 + + + The SI prefix factor corresponding to 1 000 000 000 + + + The SI prefix factor corresponding to 1 000 000 + + + The SI prefix factor corresponding to 1 000 + + + The SI prefix factor corresponding to 100 + + + The SI prefix factor corresponding to 10 + + + The SI prefix factor corresponding to 0.1 + + + The SI prefix factor corresponding to 0.01 + + + The SI prefix factor corresponding to 0.001 + + + The SI prefix factor corresponding to 0.000 001 + + + The SI prefix factor corresponding to 0.000 000 001 + + + The SI prefix factor corresponding to 0.000 000 000 001 + + + The SI prefix factor corresponding to 0.000 000 000 000 001 + + + The SI prefix factor corresponding to 0.000 000 000 000 000 001 + + + The SI prefix factor corresponding to 0.000 000 000 000 000 000 001 + + + The SI prefix factor corresponding to 0.000 000 000 000 000 000 000 001 + + + + Sets parameters for the library. + + + + + Use a specific provider if configured, e.g. using + environment variables, or fall back to the best providers. + + + + + Use the best provider available. + + + + + Gets or sets a value indicating whether the distribution classes check validate each parameter. + For the multivariate distributions this could involve an expensive matrix factorization. + The default setting of this property is true. + + + + + Gets or sets a value indicating whether to use thread safe random number generators (RNG). + Thread safe RNG about two and half time slower than non-thread safe RNG. + + + true to use thread safe random number generators ; otherwise, false. + + + + + Optional path to try to load native provider binaries from. + + + + + Gets or sets the linear algebra provider. Consider to use UseNativeMKL or UseManaged instead. + + The linear algebra provider. + + + + Gets or sets the fourier transform provider. Consider to use UseNativeMKL or UseManaged instead. + + The linear algebra provider. + + + + Gets or sets a value indicating how many parallel worker threads shall be used + when parallelization is applicable. + + Default to the number of processor cores, must be between 1 and 1024 (inclusive). + + + + Gets or sets the TaskScheduler used to schedule the worker tasks. + + + + + Gets or sets the the block size to use for + the native linear algebra provider. + + The block size. Default 512, must be at least 32. + + + + Gets or sets the order of the matrix when linear algebra provider + must calculate multiply in parallel threads. + + The order. Default 64, must be at least 3. + + + + Gets or sets the number of elements a vector or matrix + must contain before we multiply threads. + + Number of elements. Default 300, must be at least 3. + + + + Numerical Derivative. + + + + + Initialized a NumericalDerivative with the given points and center. + + + + + Initialized a NumericalDerivative with the default points and center for the given order. + + + + + Evaluates the derivative of a scalar univariate function. + + Univariate function handle. + Point at which to evaluate the derivative. + Derivative order. + + + + Creates a function handle for the derivative of a scalar univariate function. + + Univariate function handle. + Derivative order. + + + + Evaluates the first derivative of a scalar univariate function. + + Univariate function handle. + Point at which to evaluate the derivative. + + + + Creates a function handle for the first derivative of a scalar univariate function. + + Univariate function handle. + + + + Evaluates the second derivative of a scalar univariate function. + + Univariate function handle. + Point at which to evaluate the derivative. + + + + Creates a function handle for the second derivative of a scalar univariate function. + + Univariate function handle. + + + + Evaluates the partial derivative of a multivariate function. + + Multivariate function handle. + Vector at which to evaluate the derivative. + Index of independent variable for partial derivative. + Derivative order. + + + + Creates a function handle for the partial derivative of a multivariate function. + + Multivariate function handle. + Index of independent variable for partial derivative. + Derivative order. + + + + Evaluates the first partial derivative of a multivariate function. + + Multivariate function handle. + Vector at which to evaluate the derivative. + Index of independent variable for partial derivative. + + + + Creates a function handle for the first partial derivative of a multivariate function. + + Multivariate function handle. + Index of independent variable for partial derivative. + + + + Evaluates the partial derivative of a bivariate function. + + Bivariate function handle. + First argument at which to evaluate the derivative. + Second argument at which to evaluate the derivative. + Index of independent variable for partial derivative. + Derivative order. + + + + Creates a function handle for the partial derivative of a bivariate function. + + Bivariate function handle. + Index of independent variable for partial derivative. + Derivative order. + + + + Evaluates the first partial derivative of a bivariate function. + + Bivariate function handle. + First argument at which to evaluate the derivative. + Second argument at which to evaluate the derivative. + Index of independent variable for partial derivative. + + + + Creates a function handle for the first partial derivative of a bivariate function. + + Bivariate function handle. + Index of independent variable for partial derivative. + + + + Class to calculate finite difference coefficients using Taylor series expansion method. + + + For n points, coefficients are calculated up to the maximum derivative order possible (n-1). + The current function value position specifies the "center" for surrounding coefficients. + Selecting the first, middle or last positions represent forward, backwards and central difference methods. + + + + + + + Number of points for finite difference coefficients. Changing this value recalculates the coefficients table. + + + + + Initializes a new instance of the class. + + Number of finite difference coefficients. + + + + Gets the finite difference coefficients for a specified center and order. + + Current function position with respect to coefficients. Must be within point range. + Order of finite difference coefficients. + Vector of finite difference coefficients. + + + + Gets the finite difference coefficients for all orders at a specified center. + + Current function position with respect to coefficients. Must be within point range. + Rectangular array of coefficients, with columns specifing order. + + + + Type of finite different step size. + + + + + The absolute step size value will be used in numerical derivatives, regardless of order or function parameters. + + + + + A base step size value, h, will be scaled according to the function input parameter. A common example is hx = h*(1+abs(x)), however + this may vary depending on implementation. This definition only guarantees that the only scaling will be relative to the + function input parameter and not the order of the finite difference derivative. + + + + + A base step size value, eps (typically machine precision), is scaled according to the finite difference coefficient order + and function input parameter. The initial scaling according to finite different coefficient order can be thought of as producing a + base step size, h, that is equivalent to scaling. This stepsize is then scaled according to the function + input parameter. Although implementation may vary, an example of second order accurate scaling may be (eps)^(1/3)*(1+abs(x)). + + + + + Class to evaluate the numerical derivative of a function using finite difference approximations. + Variable point and center methods can be initialized . + This class can also be used to return function handles (delegates) for a fixed derivative order and variable. + It is possible to evaluate the derivative and partial derivative of univariate and multivariate functions respectively. + + + + + Initializes a NumericalDerivative class with the default 3 point center difference method. + + + + + Initialized a NumericalDerivative class. + + Number of points for finite difference derivatives. + Location of the center with respect to other points. Value ranges from zero to points-1. + + + + Sets and gets the finite difference step size. This value is for each function evaluation if relative stepsize types are used. + If the base step size used in scaling is desired, see . + + + Setting then getting the StepSize may return a different value. This is not unusual since a user-defined step size is converted to a + base-2 representable number to improve finite difference accuracy. + + + + + Sets and gets the base fininte difference step size. This assigned value to this parameter is only used if is set to RelativeX. + However, if the StepType is Relative, it will contain the base step size computed from based on the finite difference order. + + + + + Sets and gets the base finite difference step size. This parameter is only used if is set to Relative. + By default this is set to machine epsilon, from which is computed. + + + + + Sets and gets the location of the center point for the finite difference derivative. + + + + + Number of times a function is evaluated for numerical derivatives. + + + + + Type of step size for computing finite differences. If set to absolute, dx = h. + If set to relative, dx = (1+abs(x))*h^(2/(order+1)). This provides accurate results when + h is approximately equal to the square-root of machine accuracy, epsilon. + + + + + Evaluates the derivative of equidistant points using the finite difference method. + + Vector of points StepSize apart. + Derivative order. + Finite difference step size. + Derivative of points of the specified order. + + + + Evaluates the derivative of a scalar univariate function. + + + Supplying the optional argument currentValue will reduce the number of function evaluations + required to calculate the finite difference derivative. + + Function handle. + Point at which to compute the derivative. + Derivative order. + Current function value at center. + Function derivative at x of the specified order. + + + + Creates a function handle for the derivative of a scalar univariate function. + + Input function handle. + Derivative order. + Function handle that evaluates the derivative of input function at a fixed order. + + + + Evaluates the partial derivative of a multivariate function. + + Multivariate function handle. + Vector at which to evaluate the derivative. + Index of independent variable for partial derivative. + Derivative order. + Current function value at center. + Function partial derivative at x of the specified order. + + + + Evaluates the partial derivatives of a multivariate function array. + + + This function assumes the input vector x is of the correct length for f. + + Multivariate vector function array handle. + Vector at which to evaluate the derivatives. + Index of independent variable for partial derivative. + Derivative order. + Current function value at center. + Vector of functions partial derivatives at x of the specified order. + + + + Creates a function handle for the partial derivative of a multivariate function. + + Input function handle. + Index of the independent variable for partial derivative. + Derivative order. + Function handle that evaluates partial derivative of input function at a fixed order. + + + + Creates a function handle for the partial derivative of a vector multivariate function. + + Input function handle. + Index of the independent variable for partial derivative. + Derivative order. + Function handle that evaluates partial derivative of input function at fixed order. + + + + Evaluates the mixed partial derivative of variable order for multivariate functions. + + + This function recursively uses to evaluate mixed partial derivative. + Therefore, it is more efficient to call for higher order derivatives of + a single independent variable. + + Multivariate function handle. + Points at which to evaluate the derivative. + Vector of indices for the independent variables at descending derivative orders. + Highest order of differentiation. + Current function value at center. + Function mixed partial derivative at x of the specified order. + + + + Evaluates the mixed partial derivative of variable order for multivariate function arrays. + + + This function recursively uses to evaluate mixed partial derivative. + Therefore, it is more efficient to call for higher order derivatives of + a single independent variable. + + Multivariate function array handle. + Vector at which to evaluate the derivative. + Vector of indices for the independent variables at descending derivative orders. + Highest order of differentiation. + Current function value at center. + Function mixed partial derivatives at x of the specified order. + + + + Creates a function handle for the mixed partial derivative of a multivariate function. + + Input function handle. + Vector of indices for the independent variables at descending derivative orders. + Highest derivative order. + Function handle that evaluates the fixed mixed partial derivative of input function at fixed order. + + + + Creates a function handle for the mixed partial derivative of a multivariate vector function. + + Input vector function handle. + Vector of indices for the independent variables at descending derivative orders. + Highest derivative order. + Function handle that evaluates the fixed mixed partial derivative of input function at fixed order. + + + + Resets the evaluation counter. + + + + + Class for evaluating the Hessian of a smooth continuously differentiable function using finite differences. + By default, a central 3-point method is used. + + + + + Number of function evaluations. + + + + + Creates a numerical Hessian object with a three point central difference method. + + + + + Creates a numerical Hessian with a specified differentiation scheme. + + Number of points for Hessian evaluation. + Center point for differentiation. + + + + Evaluates the Hessian of the scalar univariate function f at points x. + + Scalar univariate function handle. + Point at which to evaluate Hessian. + Hessian tensor. + + + + Evaluates the Hessian of a multivariate function f at points x. + + + This method of computing the Hessian is only vaid for Lipschitz continuous functions. + The function mirrors the Hessian along the diagonal since d2f/dxdy = d2f/dydx for continuously differentiable functions. + + Multivariate function handle.> + Points at which to evaluate Hessian.> + Hessian tensor. + + + + Resets the function evaluation counter for the Hessian. + + + + + Class for evaluating the Jacobian of a function using finite differences. + By default, a central 3-point method is used. + + + + + Number of function evaluations. + + + + + Creates a numerical Jacobian object with a three point central difference method. + + + + + Creates a numerical Jacobian with a specified differentiation scheme. + + Number of points for Jacobian evaluation. + Center point for differentiation. + + + + Evaluates the Jacobian of scalar univariate function f at point x. + + Scalar univariate function handle. + Point at which to evaluate Jacobian. + Jacobian vector. + + + + Evaluates the Jacobian of a multivariate function f at vector x. + + + This function assumes that the length of vector x consistent with the argument count of f. + + Multivariate function handle. + Points at which to evaluate Jacobian. + Jacobian vector. + + + + Evaluates the Jacobian of a multivariate function f at vector x given a current function value. + + + To minimize the number of function evaluations, a user can supply the current value of the function + to be used in computing the Jacobian. This value must correspond to the "center" location for the + finite differencing. If a scheme is used where the center value is not evaluated, this will provide no + added efficiency. This method also assumes that the length of vector x consistent with the argument count of f. + + Multivariate function handle. + Points at which to evaluate Jacobian. + Current function value at finite difference center. + Jacobian vector. + + + + Evaluates the Jacobian of a multivariate function array f at vector x. + + Multivariate function array handle. + Vector at which to evaluate Jacobian. + Jacobian matrix. + + + + Evaluates the Jacobian of a multivariate function array f at vector x given a vector of current function values. + + + To minimize the number of function evaluations, a user can supply a vector of current values of the functions + to be used in computing the Jacobian. These value must correspond to the "center" location for the + finite differencing. If a scheme is used where the center value is not evaluated, this will provide no + added efficiency. This method also assumes that the length of vector x consistent with the argument count of f. + + Multivariate function array handle. + Vector at which to evaluate Jacobian. + Vector of current function values. + Jacobian matrix. + + + + Resets the function evaluation counter for the Jacobian. + + + + + Metrics to measure the distance between two structures. + + + + + Sum of Absolute Difference (SAD), i.e. the L1-norm (Manhattan) of the difference. + + + + + Sum of Absolute Difference (SAD), i.e. the L1-norm (Manhattan) of the difference. + + + + + Sum of Absolute Difference (SAD), i.e. the L1-norm (Manhattan) of the difference. + + + + + Mean-Absolute Error (MAE), i.e. the normalized L1-norm (Manhattan) of the difference. + + + + + Mean-Absolute Error (MAE), i.e. the normalized L1-norm (Manhattan) of the difference. + + + + + Mean-Absolute Error (MAE), i.e. the normalized L1-norm (Manhattan) of the difference. + + + + + Sum of Squared Difference (SSD), i.e. the squared L2-norm (Euclidean) of the difference. + + + + + Sum of Squared Difference (SSD), i.e. the squared L2-norm (Euclidean) of the difference. + + + + + Sum of Squared Difference (SSD), i.e. the squared L2-norm (Euclidean) of the difference. + + + + + Mean-Squared Error (MSE), i.e. the normalized squared L2-norm (Euclidean) of the difference. + + + + + Mean-Squared Error (MSE), i.e. the normalized squared L2-norm (Euclidean) of the difference. + + + + + Mean-Squared Error (MSE), i.e. the normalized squared L2-norm (Euclidean) of the difference. + + + + + Euclidean Distance, i.e. the L2-norm of the difference. + + + + + Euclidean Distance, i.e. the L2-norm of the difference. + + + + + Euclidean Distance, i.e. the L2-norm of the difference. + + + + + Manhattan Distance, i.e. the L1-norm of the difference. + + + + + Manhattan Distance, i.e. the L1-norm of the difference. + + + + + Manhattan Distance, i.e. the L1-norm of the difference. + + + + + Chebyshev Distance, i.e. the Infinity-norm of the difference. + + + + + Chebyshev Distance, i.e. the Infinity-norm of the difference. + + + + + Chebyshev Distance, i.e. the Infinity-norm of the difference. + + + + + Minkowski Distance, i.e. the generalized p-norm of the difference. + + + + + Minkowski Distance, i.e. the generalized p-norm of the difference. + + + + + Minkowski Distance, i.e. the generalized p-norm of the difference. + + + + + Canberra Distance, a weighted version of the L1-norm of the difference. + + + + + Canberra Distance, a weighted version of the L1-norm of the difference. + + + + + Cosine Distance, representing the angular distance while ignoring the scale. + + + + + Cosine Distance, representing the angular distance while ignoring the scale. + + + + + Hamming Distance, i.e. the number of positions that have different values in the vectors. + + + + + Hamming Distance, i.e. the number of positions that have different values in the vectors. + + + + + Pearson's distance, i.e. 1 - the person correlation coefficient. + + + + + Jaccard distance, i.e. 1 - the Jaccard index. + + Thrown if a or b are null. + Throw if a and b are of different lengths. + Jaccard distance. + + + + Jaccard distance, i.e. 1 - the Jaccard index. + + Thrown if a or b are null. + Throw if a and b are of different lengths. + Jaccard distance. + + + + Discrete Univariate Bernoulli distribution. + The Bernoulli distribution is a distribution over bits. The parameter + p specifies the probability that a 1 is generated. + Wikipedia - Bernoulli distribution. + + + + + Initializes a new instance of the Bernoulli class. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + If the Bernoulli parameter is not in the range [0,1]. + + + + Initializes a new instance of the Bernoulli class. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + The random number generator which is used to draw random samples. + If the Bernoulli parameter is not in the range [0,1]. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Gets the probability of generating a one. Range: 0 ≤ p ≤ 1. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the mode of the distribution. + + + + + Gets all modes of the distribution. + + + + + Gets the median of the distribution. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + the cumulative distribution at location . + + + + + Generates one sample from the Bernoulli distribution. + + The random source to use. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + A random sample from the Bernoulli distribution. + + + + Samples a Bernoulli distributed random variable. + + A sample from the Bernoulli distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of Bernoulli distributed random variables. + + a sequence of samples from the distribution. + + + + Samples a Bernoulli distributed random variable. + + The random number generator to use. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + A sample from the Bernoulli distribution. + + + + Samples a sequence of Bernoulli distributed random variables. + + The random number generator to use. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + a sequence of samples from the distribution. + + + + Samples a Bernoulli distributed random variable. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + A sample from the Bernoulli distribution. + + + + Samples a sequence of Bernoulli distributed random variables. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + a sequence of samples from the distribution. + + + + Continuous Univariate Beta distribution. + For details about this distribution, see + Wikipedia - Beta distribution. + + + There are a few special cases for the parameterization of the Beta distribution. When both + shape parameters are positive infinity, the Beta distribution degenerates to a point distribution + at 0.5. When one of the shape parameters is positive infinity, the distribution degenerates to a point + distribution at the positive infinity. When both shape parameters are 0.0, the Beta distribution + degenerates to a Bernoulli distribution with parameter 0.5. When one shape parameter is 0.0, the + distribution degenerates to a point distribution at the non-zero shape parameter. + + + + + Initializes a new instance of the Beta class. + + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + + + + Initializes a new instance of the Beta class. + + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + A string representation of the Beta distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + + + + Gets the α shape parameter of the Beta distribution. Range: α ≥ 0. + + + + + Gets the β shape parameter of the Beta distribution. Range: β ≥ 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the Beta distribution. + + + + + Gets the variance of the Beta distribution. + + + + + Gets the standard deviation of the Beta distribution. + + + + + Gets the entropy of the Beta distribution. + + + + + Gets the skewness of the Beta distribution. + + + + + Gets the mode of the Beta distribution; when there are multiple answers, this routine will return 0.5. + + + + + Gets the median of the Beta distribution. + + + + + Gets the minimum of the Beta distribution. + + + + + Gets the maximum of the Beta distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Generates a sample from the Beta distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Beta distribution. + + a sequence of samples from the distribution. + + + + Samples Beta distributed random variables by sampling two Gamma variables and normalizing. + + The random number generator to use. + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + a random number from the Beta distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Generates a sample from the distribution. + + The random number generator to use. + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Initializes a new instance of the BetaScaled class. + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + + + + Initializes a new instance of the BetaScaled class. + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The random number generator which is used to draw random samples. + + + + Create a Beta PERT distribution, used in risk analysis and other domains where an expert forecast + is used to construct an underlying beta distribution. + + The minimum value. + The maximum value. + The most likely value (mode). + The random number generator which is used to draw random samples. + The Beta distribution derived from the PERT parameters. + + + + A string representation of the distribution. + + A string representation of the BetaScaled distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + + + + Gets the α shape parameter of the BetaScaled distribution. Range: α > 0. + + + + + Gets the β shape parameter of the BetaScaled distribution. Range: β > 0. + + + + + Gets the location (μ) of the BetaScaled distribution. + + + + + Gets the scale (σ) of the BetaScaled distribution. Range: σ > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the BetaScaled distribution. + + + + + Gets the variance of the BetaScaled distribution. + + + + + Gets the standard deviation of the BetaScaled distribution. + + + + + Gets the entropy of the BetaScaled distribution. + + + + + Gets the skewness of the BetaScaled distribution. + + + + + Gets the mode of the BetaScaled distribution; when there are multiple answers, this routine will return 0.5. + + + + + Gets the median of the BetaScaled distribution. + + + + + Gets the minimum of the BetaScaled distribution. + + + + + Gets the maximum of the BetaScaled distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Generates a sample from the distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Generates a sample from the distribution. + + The random number generator to use. + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Discrete Univariate Binomial distribution. + For details about this distribution, see + Wikipedia - Binomial distribution. + + + The distribution is parameterized by a probability (between 0.0 and 1.0). + + + + + Initializes a new instance of the Binomial class. + + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + If is not in the interval [0.0,1.0]. + If is negative. + + + + Initializes a new instance of the Binomial class. + + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + The random number generator which is used to draw random samples. + If is not in the interval [0.0,1.0]. + If is negative. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + + + + Gets the success probability in each trial. Range: 0 ≤ p ≤ 1. + + + + + Gets the number of trials. Range: n ≥ 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the mode of the distribution. + + + + + Gets all modes of the distribution. + + + + + Gets the median of the distribution. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + the cumulative distribution at location . + + + + + Generates a sample from the Binomial distribution without doing parameter checking. + + The random number generator to use. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + The number of successful trials. + + + + Samples a Binomially distributed random variable. + + The number of successes in N trials. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of Binomially distributed random variables. + + a sequence of successes in N trials. + + + + Samples a binomially distributed random variable. + + The random number generator to use. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + The number of successes in trials. + + + + Samples a sequence of binomially distributed random variable. + + The random number generator to use. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + a sequence of successes in trials. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + a sequence of successes in trials. + + + + Samples a binomially distributed random variable. + + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + The number of successes in trials. + + + + Samples a sequence of binomially distributed random variable. + + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + a sequence of successes in trials. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + a sequence of successes in trials. + + + + Discrete Univariate Categorical distribution. + For details about this distribution, see + Wikipedia - Categorical distribution. This + distribution is sometimes called the Discrete distribution. + + + The distribution is parameterized by a vector of ratios: in other words, the parameter + does not have to be normalized and sum to 1. The reason is that some vectors can't be exactly normalized + to sum to 1 in floating point representation. + + + Support: 0..k where k = length(probability mass array)-1 + + + + + Initializes a new instance of the Categorical class. + + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + If any of the probabilities are negative or do not sum to one. + + + + Initializes a new instance of the Categorical class. + + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + The random number generator which is used to draw random samples. + If any of the probabilities are negative or do not sum to one. + + + + Initializes a new instance of the Categorical class from a . The distribution + will not be automatically updated when the histogram changes. The categorical distribution will have + one value for each bucket and a probability for that value proportional to the bucket count. + + The histogram from which to create the categorical variable. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Checks whether the parameters of the distribution are valid. + + An array of nonnegative ratios: this array does not need to be normalized as this is often impossible using floating point arithmetic. + If any of the probabilities are negative returns false, or if the sum of parameters is 0.0; otherwise true + + + + Checks whether the parameters of the distribution are valid. + + An array of nonnegative ratios: this array does not need to be normalized as this is often impossible using floating point arithmetic. + If any of the probabilities are negative returns false, or if the sum of parameters is 0.0; otherwise true + + + + Gets the probability mass vector (non-negative ratios) of the multinomial. + + Sometimes the normalized probability vector cannot be represented exactly in a floating point representation. + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + Throws a . + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Gets he mode of the distribution. + + Throws a . + + + + Gets the median of the distribution. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. + + A real number between 0 and 1. + An integer between 0 and the size of the categorical (exclusive), that corresponds to the inverse CDF for the given probability. + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. + + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + A real number between 0 and 1. + An integer between 0 and the size of the categorical (exclusive), that corresponds to the inverse CDF for the given probability. + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. + + An array corresponding to a CDF for a categorical distribution. Not assumed to be normalized. + A real number between 0 and 1. + An integer between 0 and the size of the categorical (exclusive), that corresponds to the inverse CDF for the given probability. + + + + Computes the cumulative distribution function. This method performs no parameter checking. + If the probability mass was normalized, the resulting cumulative distribution is normalized as well (up to numerical errors). + + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + An array representing the unnormalized cumulative distribution function. + + + + Returns one trials from the categorical distribution. + + The random number generator to use. + The (unnormalized) cumulative distribution of the probability distribution. + One sample from the categorical distribution implied by . + + + + Samples a Binomially distributed random variable. + + The number of successful trials. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of Bernoulli distributed random variables. + + a sequence of successful trial counts. + + + + Samples one categorical distributed random variable; also known as the Discrete distribution. + + The random number generator to use. + An array of nonnegative ratios. Not assumed to be normalized. + One random integer between 0 and the size of the categorical (exclusive). + + + + Samples a categorically distributed random variable. + + The random number generator to use. + An array of nonnegative ratios. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + An array of nonnegative ratios. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Samples one categorical distributed random variable; also known as the Discrete distribution. + + An array of nonnegative ratios. Not assumed to be normalized. + One random integer between 0 and the size of the categorical (exclusive). + + + + Samples a categorically distributed random variable. + + An array of nonnegative ratios. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + An array of nonnegative ratios. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Samples one categorical distributed random variable; also known as the Discrete distribution. + + The random number generator to use. + An array of the cumulative distribution. Not assumed to be normalized. + One random integer between 0 and the size of the categorical (exclusive). + + + + Samples a categorically distributed random variable. + + The random number generator to use. + An array of the cumulative distribution. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + An array of the cumulative distribution. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Samples one categorical distributed random variable; also known as the Discrete distribution. + + An array of the cumulative distribution. Not assumed to be normalized. + One random integer between 0 and the size of the categorical (exclusive). + + + + Samples a categorically distributed random variable. + + An array of the cumulative distribution. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + An array of the cumulative distribution. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Continuous Univariate Cauchy distribution. + The Cauchy distribution is a symmetric continuous probability distribution. For details about this distribution, see + Wikipedia - Cauchy distribution. + + + + + Initializes a new instance of the class with the location parameter set to 0 and the scale parameter set to 1 + + + + + Initializes a new instance of the class. + + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + + + + Initializes a new instance of the class. + + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + + + + Gets the location (x0) of the distribution. + + + + + Gets the scale (γ) of the distribution. Range: γ > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Draws a random sample from the distribution. + + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Cauchy distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + the inverse cumulative density at . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + a sequence of samples from the distribution. + + + + Continuous Univariate Chi distribution. + This distribution is a continuous probability distribution. The distribution usually arises when a k-dimensional vector's orthogonal + components are independent and each follow a standard normal distribution. The length of the vector will + then have a chi distribution. + Wikipedia - Chi distribution. + + + + + Initializes a new instance of the class. + + The degrees of freedom (k) of the distribution. Range: k > 0. + + + + Initializes a new instance of the class. + + The degrees of freedom (k) of the distribution. Range: k > 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The degrees of freedom (k) of the distribution. Range: k > 0. + + + + Gets the degrees of freedom (k) of the Chi distribution. Range: k > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Generates a sample from the Chi distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Chi distribution. + + a sequence of samples from the distribution. + + + + Samples the distribution. + + The random number generator to use. + The degrees of freedom (k) of the distribution. Range: k > 0. + a random number from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The degrees of freedom (k) of the distribution. Range: k > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The degrees of freedom (k) of the distribution. Range: k > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The degrees of freedom (k) of the distribution. Range: k > 0. + the cumulative distribution at location . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The degrees of freedom (k) of the distribution. Range: k > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sequence of samples from the distribution. + + + + Continuous Univariate Chi-Squared distribution. + This distribution is a sum of the squares of k independent standard normal random variables. + Wikipedia - ChiSquare distribution. + + + + + Initializes a new instance of the class. + + The degrees of freedom (k) of the distribution. Range: k > 0. + + + + Initializes a new instance of the class. + + The degrees of freedom (k) of the distribution. Range: k > 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The degrees of freedom (k) of the distribution. Range: k > 0. + + + + Gets the degrees of freedom (k) of the Chi-Squared distribution. Range: k > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Generates a sample from the ChiSquare distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the ChiSquare distribution. + + a sequence of samples from the distribution. + + + + Samples the distribution. + + The random number generator to use. + The degrees of freedom (k) of the distribution. Range: k > 0. + a random number from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The degrees of freedom (k) of the distribution. Range: k > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The degrees of freedom (k) of the distribution. Range: k > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The degrees of freedom (k) of the distribution. Range: k > 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The degrees of freedom (k) of the distribution. Range: k > 0. + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + Generates a sample from the ChiSquare distribution. + + The random number generator to use. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Generates a sample from the ChiSquare distribution. + + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Continuous Univariate Uniform distribution. + The continuous uniform distribution is a distribution over real numbers. For details about this distribution, see + Wikipedia - Continuous uniform distribution. + + + + + Initializes a new instance of the ContinuousUniform class with lower bound 0 and upper bound 1. + + + + + Initializes a new instance of the ContinuousUniform class with given lower and upper bounds. + + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + If the upper bound is smaller than the lower bound. + + + + Initializes a new instance of the ContinuousUniform class with given lower and upper bounds. + + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + The random number generator which is used to draw random samples. + If the upper bound is smaller than the lower bound. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + + + + Gets the lower bound of the distribution. + + + + + Gets the upper bound of the distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + + Gets the median of the distribution. + + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Generates a sample from the ContinuousUniform distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the ContinuousUniform distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + the inverse cumulative density at . + + + + + Generates a sample from the ContinuousUniform distribution. + + The random number generator to use. + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + a uniformly distributed sample. + + + + Generates a sequence of samples from the ContinuousUniform distribution. + + The random number generator to use. + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + a sequence of uniformly distributed samples. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + a sequence of samples from the distribution. + + + + Generates a sample from the ContinuousUniform distribution. + + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + a uniformly distributed sample. + + + + Generates a sequence of samples from the ContinuousUniform distribution. + + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + a sequence of uniformly distributed samples. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + a sequence of samples from the distribution. + + + + Discrete Univariate Conway-Maxwell-Poisson distribution. + The Conway-Maxwell-Poisson distribution is a generalization of the Poisson, Geometric and Bernoulli + distributions. It is parameterized by two real numbers "lambda" and "nu". For + + nu = 0 the distribution reverts to a Geometric distribution + nu = 1 the distribution reverts to the Poisson distribution + nu -> infinity the distribution converges to a Bernoulli distribution + + This implementation will cache the value of the normalization constant. + Wikipedia - ConwayMaxwellPoisson distribution. + + + + + The mean of the distribution. + + + + + The variance of the distribution. + + + + + Caches the value of the normalization constant. + + + + + Since many properties of the distribution can only be computed approximately, the tolerance + level specifies how much error we accept. + + + + + Initializes a new instance of the class. + + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Initializes a new instance of the class. + + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + The random number generator which is used to draw random samples. + + + + Returns a that represents this instance. + + A that represents this instance. + + + + Tests whether the provided values are valid parameters for this distribution. + + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Gets the lambda (λ) parameter. Range: λ > 0. + + + + + Gets the rate of decay (ν) parameter. Range: ν ≥ 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution + + + + + Gets the median of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + the cumulative distribution at location . + + + + + Gets the normalization constant of the Conway-Maxwell-Poisson distribution. + + + + + Computes an approximate normalization constant for the CMP distribution. + + The lambda (λ) parameter for the CMP distribution. + The rate of decay (ν) parameter for the CMP distribution. + + an approximate normalization constant for the CMP distribution. + + + + + Returns one trials from the distribution. + + The random number generator to use. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + The z parameter. + + One sample from the distribution implied by , , and . + + + + + Samples a Conway-Maxwell-Poisson distributed random variable. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Samples a sequence of a Conway-Maxwell-Poisson distributed random variables. + + + a sequence of samples from a Conway-Maxwell-Poisson distribution. + + + + + Samples a random variable. + + The random number generator to use. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Samples a sequence of this random variable. + + The random number generator to use. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Samples a random variable. + + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Samples a sequence of this random variable. + + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Multivariate Dirichlet distribution. For details about this distribution, see + Wikipedia - Dirichlet distribution. + + + + + Initializes a new instance of the Dirichlet class. The distribution will + be initialized with the default random number generator. + + An array with the Dirichlet parameters. + + + + Initializes a new instance of the Dirichlet class. The distribution will + be initialized with the default random number generator. + + An array with the Dirichlet parameters. + The random number generator which is used to draw random samples. + + + + Initializes a new instance of the class. + random number generator. + The value of each parameter of the Dirichlet distribution. + The dimension of the Dirichlet distribution. + + + + Initializes a new instance of the class. + random number generator. + The value of each parameter of the Dirichlet distribution. + The dimension of the Dirichlet distribution. + The random number generator which is used to draw random samples. + + + + Returns a that represents this instance. + + + A that represents this instance. + + + + + Tests whether the provided values are valid parameters for this distribution. + No parameter can be less than zero and at least one parameter should be larger than zero. + + The parameters of the Dirichlet distribution. + + + + Gets or sets the parameters of the Dirichlet distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the dimension of the Dirichlet distribution. + + + + + Gets the sum of the Dirichlet parameters. + + + + + Gets the mean of the Dirichlet distribution. + + + + + Gets the variance of the Dirichlet distribution. + + + + + Gets the entropy of the distribution. + + + + + Computes the density of the distribution. + + The locations at which to compute the density. + the density at . + The Dirichlet distribution requires that the sum of the components of x equals 1. + You can also leave out the last component, and it will be computed from the others. + + + + Computes the log density of the distribution. + + The locations at which to compute the density. + the density at . + + + + Samples a Dirichlet distributed random vector. + + A sample from this distribution. + + + + Samples a Dirichlet distributed random vector. + + The random number generator to use. + The Dirichlet distribution parameter. + a sample from the distribution. + + + + Discrete Univariate Uniform distribution. + The discrete uniform distribution is a distribution over integers. The distribution + is parameterized by a lower and upper bound (both inclusive). + Wikipedia - Discrete uniform distribution. + + + + + Initializes a new instance of the DiscreteUniform class. + + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + + + + Initializes a new instance of the DiscreteUniform class. + + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + The random number generator which is used to draw random samples. + + + + Returns a that represents this instance. + + + A that represents this instance. + + + + + Tests whether the provided values are valid parameters for this distribution. + + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + + + + Gets the inclusive lower bound of the probability distribution. + + + + + Gets the inclusive upper bound of the probability distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the mode of the distribution; since every element in the domain has the same probability this method returns the middle one. + + + + + Gets the median of the distribution. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + the cumulative distribution at location . + + + + + Generates one sample from the discrete uniform distribution. This method does not do any parameter checking. + + The random source to use. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + A random sample from the discrete uniform distribution. + + + + Draws a random sample from the distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of uniformly distributed random variables. + + a sequence of samples from the distribution. + + + + Samples a uniformly distributed random variable. + + The random number generator to use. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + A sample from the discrete uniform distribution. + + + + Samples a sequence of uniformly distributed random variables. + + The random number generator to use. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + a sequence of samples from the discrete uniform distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + a sequence of samples from the discrete uniform distribution. + + + + Samples a uniformly distributed random variable. + + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + A sample from the discrete uniform distribution. + + + + Samples a sequence of uniformly distributed random variables. + + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + a sequence of samples from the discrete uniform distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + a sequence of samples from the discrete uniform distribution. + + + + Continuous Univariate Erlang distribution. + This distribution is is a continuous probability distribution with wide applicability primarily due to its + relation to the exponential and Gamma distributions. + Wikipedia - Erlang distribution. + + + + + Initializes a new instance of the class. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + + + + Initializes a new instance of the class. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + The random number generator which is used to draw random samples. + + + + Constructs a Erlang distribution from a shape and scale parameter. The distribution will + be initialized with the default random number generator. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The scale (μ) of the Erlang distribution. Range: μ ≥ 0. + The random number generator which is used to draw random samples. Optional, can be null. + + + + Constructs a Erlang distribution from a shape and inverse scale parameter. The distribution will + be initialized with the default random number generator. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + The random number generator which is used to draw random samples. Optional, can be null. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + + + + Gets the shape (k) of the Erlang distribution. Range: k ≥ 0. + + + + + Gets the rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + + + + + Gets the scale of the Erlang distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum value. + + + + + Gets the Maximum value. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Generates a sample from the Erlang distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Erlang distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + the cumulative distribution at location . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Continuous Univariate Exponential distribution. + The exponential distribution is a distribution over the real numbers parameterized by one non-negative parameter. + Wikipedia - exponential distribution. + + + + + Initializes a new instance of the class. + + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + + + + Initializes a new instance of the class. + + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + + + + Gets the rate (λ) parameter of the distribution. Range: λ ≥ 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Draws a random sample from the distribution. + + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Exponential distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + the inverse cumulative density at . + + + + + Draws a random sample from the distribution. + + The random number generator to use. + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Generates a sequence of samples from the Exponential distribution. + + The random number generator to use. + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Draws a random sample from the distribution. + + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Generates a sequence of samples from the Exponential distribution. + + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Continuous Univariate F-distribution, also known as Fisher-Snedecor distribution. + For details about this distribution, see + Wikipedia - FisherSnedecor distribution. + + + + + Initializes a new instance of the class. + + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + + + + Initializes a new instance of the class. + + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + + + + Gets the first degree of freedom (d1) of the distribution. Range: d1 > 0. + + + + + Gets the second degree of freedom (d2) of the distribution. Range: d2 > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Generates a sample from the FisherSnedecor distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the FisherSnedecor distribution. + + a sequence of samples from the distribution. + + + + Generates one sample from the FisherSnedecor distribution without parameter checking. + + The random number generator to use. + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + a FisherSnedecor distributed random number. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Generates a sample from the distribution. + + The random number generator to use. + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + a sequence of samples from the distribution. + + + + Continuous Univariate Gamma distribution. + For details about this distribution, see + Wikipedia - Gamma distribution. + + + The Gamma distribution is parametrized by a shape and inverse scale parameter. When we want + to specify a Gamma distribution which is a point distribution we set the shape parameter to be the + location of the point distribution and the inverse scale as positive infinity. The distribution + with shape and inverse scale both zero is undefined. + + Random number generation for the Gamma distribution is based on the algorithm in: + "A Simple Method for Generating Gamma Variables" - Marsaglia & Tsang + ACM Transactions on Mathematical Software, Vol. 26, No. 3, September 2000, Pages 363–372. + + + + + Initializes a new instance of the Gamma class. + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + + + + Initializes a new instance of the Gamma class. + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + The random number generator which is used to draw random samples. + + + + Constructs a Gamma distribution from a shape and scale parameter. The distribution will + be initialized with the default random number generator. + + The shape (k) of the Gamma distribution. Range: k ≥ 0. + The scale (θ) of the Gamma distribution. Range: θ ≥ 0 + The random number generator which is used to draw random samples. Optional, can be null. + + + + Constructs a Gamma distribution from a shape and inverse scale parameter. The distribution will + be initialized with the default random number generator. + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + The random number generator which is used to draw random samples. Optional, can be null. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + + + + Gets or sets the shape (k, α) of the Gamma distribution. Range: α ≥ 0. + + + + + Gets or sets the rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + + + + + Gets or sets the scale (θ) of the Gamma distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the Gamma distribution. + + + + + Gets the variance of the Gamma distribution. + + + + + Gets the standard deviation of the Gamma distribution. + + + + + Gets the entropy of the Gamma distribution. + + + + + Gets the skewness of the Gamma distribution. + + + + + Gets the mode of the Gamma distribution. + + + + + Gets the median of the Gamma distribution. + + + + + Gets the minimum of the Gamma distribution. + + + + + Gets the maximum of the Gamma distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Generates a sample from the Gamma distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Gamma distribution. + + a sequence of samples from the distribution. + + + + Sampling implementation based on: + "A Simple Method for Generating Gamma Variables" - Marsaglia & Tsang + ACM Transactions on Mathematical Software, Vol. 26, No. 3, September 2000, Pages 363–372. + This method performs no parameter checks. + + The random number generator to use. + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + A sample from a Gamma distributed random variable. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + the inverse cumulative density at . + + + + + Generates a sample from the Gamma distribution. + + The random number generator to use. + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the Gamma distribution. + + The random number generator to use. + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Generates a sample from the Gamma distribution. + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the Gamma distribution. + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Discrete Univariate Geometric distribution. + The Geometric distribution is a distribution over positive integers parameterized by one positive real number. + This implementation of the Geometric distribution will never generate 0's. + Wikipedia - geometric distribution. + + + + + Initializes a new instance of the Geometric class. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Initializes a new instance of the Geometric class. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + The random number generator which is used to draw random samples. + + + + Returns a that represents this instance. + + A that represents this instance. + + + + Tests whether the provided values are valid parameters for this distribution. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Gets the probability of generating a one. Range: 0 ≤ p ≤ 1. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + Throws a not supported exception. + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + the cumulative distribution at location . + + + + + Returns one sample from the distribution. + + The random number generator to use. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + One sample from the distribution implied by . + + + + Samples a Geometric distributed random variable. + + A sample from the Geometric distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of Geometric distributed random variables. + + a sequence of samples from the distribution. + + + + Samples a random variable. + + The random number generator to use. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Samples a sequence of this random variable. + + The random number generator to use. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Samples a random variable. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Samples a sequence of this random variable. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Discrete Univariate Hypergeometric distribution. + This distribution is a discrete probability distribution that describes the number of successes in a sequence + of n draws from a finite population without replacement, just as the binomial distribution + describes the number of successes for draws with replacement + Wikipedia - Hypergeometric distribution. + + + + + Initializes a new instance of the Hypergeometric class. + + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Initializes a new instance of the Hypergeometric class. + + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + The random number generator which is used to draw random samples. + + + + Returns a that represents this instance. + + + A that represents this instance. + + + + + Tests whether the provided values are valid parameters for this distribution. + + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the size of the population (N). + + + + + Gets the number of draws without replacement (n). + + + + + Gets the number successes within the population (K, M). + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + the cumulative distribution at location . + + + + + Generates a sample from the Hypergeometric distribution without doing parameter checking. + + The random number generator to use. + The size of the population (N). + The number successes within the population (K, M). + The n parameter of the distribution. + a random number from the Hypergeometric distribution. + + + + Samples a Hypergeometric distributed random variable. + + The number of successes in n trials. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of Hypergeometric distributed random variables. + + a sequence of successes in n trials. + + + + Samples a random variable. + + The random number generator to use. + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Samples a sequence of this random variable. + + The random number generator to use. + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Samples a random variable. + + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Samples a sequence of this random variable. + + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Continuous Univariate Probability Distribution. + + + + + + Gets the mode of the distribution. + + + + + Gets the smallest element in the domain of the distribution which can be represented by a double. + + + + + Gets the largest element in the domain of the distribution which can be represented by a double. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + Draws a random sample from the distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Draws a sequence of random samples from the distribution. + + an infinite sequence of samples from the distribution. + + + + Discrete Univariate Probability Distribution. + + + + + + Gets the mode of the distribution. + + + + + Gets the smallest element in the domain of the distribution which can be represented by an integer. + + + + + Gets the largest element in the domain of the distribution which can be represented by an integer. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Draws a random sample from the distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Draws a sequence of random samples from the distribution. + + an infinite sequence of samples from the distribution. + + + + Probability Distribution. + + + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Continuous Univariate Inverse Gamma distribution. + The inverse Gamma distribution is a distribution over the positive real numbers parameterized by + two positive parameters. + Wikipedia - InverseGamma distribution. + + + + + Initializes a new instance of the class. + + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + + + + Initializes a new instance of the class. + + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + + + + Gets or sets the shape (α) parameter. Range: α > 0. + + + + + Gets or sets The scale (β) parameter. Range: β > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + Throws . + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Draws a random sample from the distribution. + + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Cauchy distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + the cumulative distribution at location . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + a sequence of samples from the distribution. + + + + Multivariate Inverse Wishart distribution. This distribution is + parameterized by the degrees of freedom nu and the scale matrix S. The inverse Wishart distribution + is the conjugate prior for the covariance matrix of a multivariate normal distribution. + Wikipedia - Inverse-Wishart distribution. + + + + + Caches the Cholesky factorization of the scale matrix. + + + + + Initializes a new instance of the class. + + The degree of freedom (ν) for the inverse Wishart distribution. + The scale matrix (Ψ) for the inverse Wishart distribution. + + + + Initializes a new instance of the class. + + The degree of freedom (ν) for the inverse Wishart distribution. + The scale matrix (Ψ) for the inverse Wishart distribution. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The degree of freedom (ν) for the inverse Wishart distribution. + The scale matrix (Ψ) for the inverse Wishart distribution. + + + + Gets or sets the degree of freedom (ν) for the inverse Wishart distribution. + + + + + Gets or sets the scale matrix (Ψ) for the inverse Wishart distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean. + + The mean of the distribution. + + + + Gets the mode of the distribution. + + The mode of the distribution. + A. O'Hagan, and J. J. Forster (2004). Kendall's Advanced Theory of Statistics: Bayesian Inference. 2B (2 ed.). Arnold. ISBN 0-340-80752-0. + + + + Gets the variance of the distribution. + + The variance of the distribution. + Kanti V. Mardia, J. T. Kent and J. M. Bibby (1979). Multivariate Analysis. + + + + Evaluates the probability density function for the inverse Wishart distribution. + + The matrix at which to evaluate the density at. + If the argument does not have the same dimensions as the scale matrix. + the density at . + + + + Samples an inverse Wishart distributed random variable by sampling + a Wishart random variable and inverting the matrix. + + a sample from the distribution. + + + + Samples an inverse Wishart distributed random variable by sampling + a Wishart random variable and inverting the matrix. + + The random number generator to use. + The degree of freedom (ν) for the inverse Wishart distribution. + The scale matrix (Ψ) for the inverse Wishart distribution. + a sample from the distribution. + + + + Univariate Probability Distribution. + + + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the median of the distribution. + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Continuous Univariate Laplace distribution. + The Laplace distribution is a distribution over the real numbers parameterized by a mean and + scale parameter. The PDF is: + p(x) = \frac{1}{2 * scale} \exp{- |x - mean| / scale}. + Wikipedia - Laplace distribution. + + + + + Initializes a new instance of the class (location = 0, scale = 1). + + + + + Initializes a new instance of the class. + + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + If is negative. + + + + Initializes a new instance of the class. + + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + The random number generator which is used to draw random samples. + If is negative. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + + + + Gets the location (μ) of the Laplace distribution. + + + + + Gets the scale (b) of the Laplace distribution. Range: b > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Samples a Laplace distributed random variable. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sample from the Laplace distribution. + + a sample from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + the cumulative distribution at location . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + a sequence of samples from the distribution. + + + + Continuous Univariate Log-Normal distribution. + For details about this distribution, see + Wikipedia - Log-Normal distribution. + + + + + Initializes a new instance of the class. + The distribution will be initialized with the default + random number generator. + + The log-scale (μ) of the logarithm of the distribution. + The shape (σ) of the logarithm of the distribution. Range: σ ≥ 0. + + + + Initializes a new instance of the class. + The distribution will be initialized with the default + random number generator. + + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + The random number generator which is used to draw random samples. + + + + Constructs a log-normal distribution with the desired mu and sigma parameters. + + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + The random number generator which is used to draw random samples. Optional, can be null. + A log-normal distribution. + + + + Constructs a log-normal distribution with the desired mean and variance. + + The mean of the log-normal distribution. + The variance of the log-normal distribution. + The random number generator which is used to draw random samples. Optional, can be null. + A log-normal distribution. + + + + Estimates the log-normal distribution parameters from sample data with maximum-likelihood. + + The samples to estimate the distribution parameters from. + The random number generator which is used to draw random samples. Optional, can be null. + A log-normal distribution. + MATLAB: lognfit + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + + + + Gets the log-scale (μ) (mean of the logarithm) of the distribution. + + + + + Gets the shape (σ) (standard deviation of the logarithm) of the distribution. Range: σ ≥ 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mu of the log-normal distribution. + + + + + Gets the variance of the log-normal distribution. + + + + + Gets the standard deviation of the log-normal distribution. + + + + + Gets the entropy of the log-normal distribution. + + + + + Gets the skewness of the log-normal distribution. + + + + + Gets the mode of the log-normal distribution. + + + + + Gets the median of the log-normal distribution. + + + + + Gets the minimum of the log-normal distribution. + + + + + Gets the maximum of the log-normal distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Generates a sample from the log-normal distribution using the Box-Muller algorithm. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the log-normal distribution using the Box-Muller algorithm. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + the density at . + + MATLAB: lognpdf + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the density. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + the cumulative distribution at location . + + MATLAB: logncdf + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + the inverse cumulative density at . + + MATLAB: logninv + + + + Generates a sample from the log-normal distribution using the Box-Muller algorithm. + + The random number generator to use. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the log-normal distribution using the Box-Muller algorithm. + + The random number generator to use. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + Generates a sample from the log-normal distribution using the Box-Muller algorithm. + + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the log-normal distribution using the Box-Muller algorithm. + + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + Multivariate Matrix-valued Normal distributions. The distribution + is parameterized by a mean matrix (M), a covariance matrix for the rows (V) and a covariance matrix + for the columns (K). If the dimension of M is d-by-m then V is d-by-d and K is m-by-m. + Wikipedia - MatrixNormal distribution. + + + + + The mean of the matrix normal distribution. + + + + + The covariance matrix for the rows. + + + + + The covariance matrix for the columns. + + + + + Initializes a new instance of the class. + + The mean of the matrix normal. + The covariance matrix for the rows. + The covariance matrix for the columns. + If the dimensions of the mean and two covariance matrices don't match. + + + + Initializes a new instance of the class. + + The mean of the matrix normal. + The covariance matrix for the rows. + The covariance matrix for the columns. + The random number generator which is used to draw random samples. + If the dimensions of the mean and two covariance matrices don't match. + + + + Returns a that represents this instance. + + + A that represents this instance. + + + + + Tests whether the provided values are valid parameters for this distribution. + + The mean of the matrix normal. + The covariance matrix for the rows. + The covariance matrix for the columns. + + + + Gets the mean. (M) + + The mean of the distribution. + + + + Gets the row covariance. (V) + + The row covariance. + + + + Gets the column covariance. (K) + + The column covariance. + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Evaluates the probability density function for the matrix normal distribution. + + The matrix at which to evaluate the density at. + the density at + If the argument does not have the correct dimensions. + + + + Samples a matrix normal distributed random variable. + + A random number from this distribution. + + + + Samples a matrix normal distributed random variable. + + The random number generator to use. + The mean of the matrix normal. + The covariance matrix for the rows. + The covariance matrix for the columns. + If the dimensions of the mean and two covariance matrices don't match. + a sequence of samples from the distribution. + + + + Samples a vector normal distributed random variable. + + The random number generator to use. + The mean of the vector normal distribution. + The covariance matrix of the vector normal distribution. + a sequence of samples from defined distribution. + + + + Multivariate Multinomial distribution. For details about this distribution, see + Wikipedia - Multinomial distribution. + + + The distribution is parameterized by a vector of ratios: in other words, the parameter + does not have to be normalized and sum to 1. The reason is that some vectors can't be exactly normalized + to sum to 1 in floating point representation. + + + + + Stores the normalized multinomial probabilities. + + + + + The number of trials. + + + + + Initializes a new instance of the Multinomial class. + + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + The number of trials. + If any of the probabilities are negative or do not sum to one. + If is negative. + + + + Initializes a new instance of the Multinomial class. + + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + The number of trials. + The random number generator which is used to draw random samples. + If any of the probabilities are negative or do not sum to one. + If is negative. + + + + Initializes a new instance of the Multinomial class from histogram . The distribution will + not be automatically updated when the histogram changes. + + Histogram instance + The number of trials. + If any of the probabilities are negative or do not sum to one. + If is negative. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + The number of trials. + If any of the probabilities are negative returns false, + if the sum of parameters is 0.0, or if the number of trials is negative; otherwise true. + + + + Gets the proportion of ratios. + + + + + Gets the number of trials. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Computes values of the probability mass function. + + Non-negative integers x1, ..., xk + The probability mass at location . + When is null. + When length of is not equal to event probabilities count. + + + + Computes values of the log probability mass function. + + Non-negative integers x1, ..., xk + The log probability mass at location . + When is null. + When length of is not equal to event probabilities count. + + + + Samples one multinomial distributed random variable. + + the counts for each of the different possible values. + + + + Samples a sequence multinomially distributed random variables. + + a sequence of counts for each of the different possible values. + + + + Samples one multinomial distributed random variable. + + The random number generator to use. + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + The number of trials. + the counts for each of the different possible values. + + + + Samples a multinomially distributed random variable. + + The random number generator to use. + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + The number of variables needed. + a sequence of counts for each of the different possible values. + + + + Discrete Univariate Negative Binomial distribution. + The negative binomial is a distribution over the natural numbers with two parameters r, p. For the special + case that r is an integer one can interpret the distribution as the number of failures before the r'th success + when the probability of success is p. + Wikipedia - NegativeBinomial distribution. + + + + + Initializes a new instance of the class. + + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Initializes a new instance of the class. + + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + The random number generator which is used to draw random samples. + + + + Returns a that represents this instance. + + + A that represents this instance. + + + + + Tests whether the provided values are valid parameters for this distribution. + + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Gets the number of successes. Range: r ≥ 0. + + + + + Gets the probability of success. Range: 0 ≤ p ≤ 1. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution + + + + + Gets the median of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + the cumulative distribution at location . + + + + + Samples a negative binomial distributed random variable. + + The random number generator to use. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + a sample from the distribution. + + + + Samples a NegativeBinomial distributed random variable. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of NegativeBinomial distributed random variables. + + a sequence of samples from the distribution. + + + + Samples a random variable. + + The random number generator to use. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Samples a sequence of this random variable. + + The random number generator to use. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Samples a random variable. + + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Samples a sequence of this random variable. + + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Continuous Univariate Normal distribution, also known as Gaussian distribution. + For details about this distribution, see + Wikipedia - Normal distribution. + + + + + Initializes a new instance of the Normal class. This is a normal distribution with mean 0.0 + and standard deviation 1.0. The distribution will + be initialized with the default random number generator. + + + + + Initializes a new instance of the Normal class. This is a normal distribution with mean 0.0 + and standard deviation 1.0. The distribution will + be initialized with the default random number generator. + + The random number generator which is used to draw random samples. + + + + Initializes a new instance of the Normal class with a particular mean and standard deviation. The distribution will + be initialized with the default random number generator. + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + + + + Initializes a new instance of the Normal class with a particular mean and standard deviation. The distribution will + be initialized with the default random number generator. + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + The random number generator which is used to draw random samples. + + + + Constructs a normal distribution from a mean and standard deviation. + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + The random number generator which is used to draw random samples. Optional, can be null. + a normal distribution. + + + + Constructs a normal distribution from a mean and variance. + + The mean (μ) of the normal distribution. + The variance (σ^2) of the normal distribution. + The random number generator which is used to draw random samples. Optional, can be null. + A normal distribution. + + + + Constructs a normal distribution from a mean and precision. + + The mean (μ) of the normal distribution. + The precision of the normal distribution. + The random number generator which is used to draw random samples. Optional, can be null. + A normal distribution. + + + + Estimates the normal distribution parameters from sample data with maximum-likelihood. + + The samples to estimate the distribution parameters from. + The random number generator which is used to draw random samples. Optional, can be null. + A normal distribution. + MATLAB: normfit + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + + + + Gets the mean (μ) of the normal distribution. + + + + + Gets the standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + + + + + Gets the variance of the normal distribution. + + + + + Gets the precision of the normal distribution. + + + + + Gets the random number generator which is used to draw random samples. + + + + + Gets the entropy of the normal distribution. + + + + + Gets the skewness of the normal distribution. + + + + + Gets the mode of the normal distribution. + + + + + Gets the median of the normal distribution. + + + + + Gets the minimum of the normal distribution. + + + + + Gets the maximum of the normal distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Generates a sample from the normal distribution using the Box-Muller algorithm. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the normal distribution using the Box-Muller algorithm. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + The location at which to compute the density. + the density at . + + MATLAB: normpdf + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + the cumulative distribution at location . + + MATLAB: normcdf + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + the inverse cumulative density at . + + MATLAB: norminv + + + + Generates a sample from the normal distribution using the Box-Muller algorithm. + + The random number generator to use. + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the normal distribution using the Box-Muller algorithm. + + The random number generator to use. + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + Generates a sample from the normal distribution using the Box-Muller algorithm. + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the normal distribution using the Box-Muller algorithm. + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + This structure represents the type over which the distribution + is defined. + + + + + The mean value. + + + + + The precision value. + + + + + Initializes a new instance of the struct. + + The mean of the pair. + The precision of the pair. + + + + Gets or sets the mean of the pair. + + + + + Gets or sets the precision of the pair. + + + + + Multivariate Normal-Gamma Distribution. + The distribution is the conjugate prior distribution for the + distribution. It specifies a prior over the mean and precision of the distribution. + It is parameterized by four numbers: the mean location, the mean scale, the precision shape and the + precision inverse scale. + The distribution NG(mu, tau | mloc,mscale,psscale,pinvscale) = Normal(mu | mloc, 1/(mscale*tau)) * Gamma(tau | psscale,pinvscale). + The following degenerate cases are special: when the precision is known, + the precision shape will encode the value of the precision while the precision inverse scale is positive + infinity. When the mean is known, the mean location will encode the value of the mean while the scale + will be positive infinity. A completely degenerate NormalGamma distribution with known mean and precision is possible as well. + Wikipedia - Normal-Gamma distribution. + + + + + Initializes a new instance of the class. + + The location of the mean. + The scale of the mean. + The shape of the precision. + The inverse scale of the precision. + + + + Initializes a new instance of the class. + + The location of the mean. + The scale of the mean. + The shape of the precision. + The inverse scale of the precision. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The location of the mean. + The scale of the mean. + The shape of the precision. + The inverse scale of the precision. + + + + Gets the location of the mean. + + + + + Gets the scale of the mean. + + + + + Gets the shape of the precision. + + + + + Gets the inverse scale of the precision. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Returns the marginal distribution for the mean of the NormalGamma distribution. + + the marginal distribution for the mean of the NormalGamma distribution. + + + + Returns the marginal distribution for the precision of the distribution. + + The marginal distribution for the precision of the distribution/ + + + + Gets the mean of the distribution. + + The mean of the distribution. + + + + Gets the variance of the distribution. + + The mean of the distribution. + + + + Evaluates the probability density function for a NormalGamma distribution. + + The mean/precision pair of the distribution + Density value + + + + Evaluates the probability density function for a NormalGamma distribution. + + The mean of the distribution + The precision of the distribution + Density value + + + + Evaluates the log probability density function for a NormalGamma distribution. + + The mean/precision pair of the distribution + The log of the density value + + + + Evaluates the log probability density function for a NormalGamma distribution. + + The mean of the distribution + The precision of the distribution + The log of the density value + + + + Generates a sample from the NormalGamma distribution. + + a sample from the distribution. + + + + Generates a sequence of samples from the NormalGamma distribution + + a sequence of samples from the distribution. + + + + Generates a sample from the NormalGamma distribution. + + The random number generator to use. + The location of the mean. + The scale of the mean. + The shape of the precision. + The inverse scale of the precision. + a sample from the distribution. + + + + Generates a sequence of samples from the NormalGamma distribution + + The random number generator to use. + The location of the mean. + The scale of the mean. + The shape of the precision. + The inverse scale of the precision. + a sequence of samples from the distribution. + + + + Continuous Univariate Pareto distribution. + The Pareto distribution is a power law probability distribution that coincides with social, + scientific, geophysical, actuarial, and many other types of observable phenomena. + For details about this distribution, see + Wikipedia - Pareto distribution. + + + + + Initializes a new instance of the class. + + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + If or are negative. + + + + Initializes a new instance of the class. + + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + The random number generator which is used to draw random samples. + If or are negative. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + + + + Gets the scale (xm) of the distribution. Range: xm > 0. + + + + + Gets the shape (α) of the distribution. Range: α > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Draws a random sample from the distribution. + + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Pareto distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + the inverse cumulative density at . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + a sequence of samples from the distribution. + + + + Discrete Univariate Poisson distribution. + + + Distribution is described at Wikipedia - Poisson distribution. + Knuth's method is used to generate Poisson distributed random variables. + f(x) = exp(-λ)*λ^x/x!; + + + + + Initializes a new instance of the class. + + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + If is equal or less then 0.0. + + + + Initializes a new instance of the class. + + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + The random number generator which is used to draw random samples. + If is equal or less then 0.0. + + + + Returns a that represents this instance. + + + A that represents this instance. + + + + + Tests whether the provided values are valid parameters for this distribution. + + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + + + + Gets the Poisson distribution parameter λ. Range: λ > 0. + + + + + Gets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + Approximation, see Wikipedia Poisson distribution + + + + Gets the skewness of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + Approximation, see Wikipedia Poisson distribution + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + the cumulative distribution at location . + + + + + Generates one sample from the Poisson distribution. + + The random source to use. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + A random sample from the Poisson distribution. + + + + Generates one sample from the Poisson distribution by Knuth's method. + + The random source to use. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + A random sample from the Poisson distribution. + + + + Generates one sample from the Poisson distribution by "Rejection method PA". + + The random source to use. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + A random sample from the Poisson distribution. + "Rejection method PA" from "The Computer Generation of Poisson Random Variables" by A. C. Atkinson, + Journal of the Royal Statistical Society Series C (Applied Statistics) Vol. 28, No. 1. (1979) + The article is on pages 29-35. The algorithm given here is on page 32. + + + + Samples a Poisson distributed random variable. + + A sample from the Poisson distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of Poisson distributed random variables. + + a sequence of successes in N trials. + + + + Samples a Poisson distributed random variable. + + The random number generator to use. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + A sample from the Poisson distribution. + + + + Samples a sequence of Poisson distributed random variables. + + The random number generator to use. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Samples a Poisson distributed random variable. + + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + A sample from the Poisson distribution. + + + + Samples a sequence of Poisson distributed random variables. + + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Continuous Univariate Rayleigh distribution. + The Rayleigh distribution (pronounced /ˈreɪli/) is a continuous probability distribution. As an + example of how it arises, the wind speed will have a Rayleigh distribution if the components of + the two-dimensional wind velocity vector are uncorrelated and normally distributed with equal variance. + For details about this distribution, see + Wikipedia - Rayleigh distribution. + + + + + Initializes a new instance of the class. + + The scale (σ) of the distribution. Range: σ > 0. + If is negative. + + + + Initializes a new instance of the class. + + The scale (σ) of the distribution. Range: σ > 0. + The random number generator which is used to draw random samples. + If is negative. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The scale (σ) of the distribution. Range: σ > 0. + + + + Gets the scale (σ) of the distribution. Range: σ > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Draws a random sample from the distribution. + + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Rayleigh distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The scale (σ) of the distribution. Range: σ > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The scale (σ) of the distribution. Range: σ > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The scale (σ) of the distribution. Range: σ > 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The scale (σ) of the distribution. Range: σ > 0. + the inverse cumulative density at . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The scale (σ) of the distribution. Range: σ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The scale (σ) of the distribution. Range: σ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Continuous Univariate Stable distribution. + A random variable is said to be stable (or to have a stable distribution) if it has + the property that a linear combination of two independent copies of the variable has + the same distribution, up to location and scale parameters. + For details about this distribution, see + Wikipedia - Stable distribution. + + + + + Initializes a new instance of the class. + + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + + + + Initializes a new instance of the class. + + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + + + + Gets the stability (α) of the distribution. Range: 2 ≥ α > 0. + + + + + Gets The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + + + + + Gets the scale (c) of the distribution. Range: c > 0. + + + + + Gets the location (μ) of the distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets he entropy of the distribution. + + Always throws a not supported exception. + + + + Gets the skewness of the distribution. + + Throws a not supported exception of Alpha != 2. + + + + Gets the mode of the distribution. + + Throws a not supported exception if Beta != 0. + + + + Gets the median of the distribution. + + Throws a not supported exception if Beta != 0. + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + Throws a not supported exception if Alpha != 2, (Alpha != 1 and Beta !=0), or (Alpha != 0.5 and Beta != 1) + + + + Samples the distribution. + + The random number generator to use. + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + a random number from the distribution. + + + + Draws a random sample from the distribution. + + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Stable distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + the cumulative distribution at location . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + a sequence of samples from the distribution. + + + + Continuous Univariate Student's T-distribution. + Implements the univariate Student t-distribution. For details about this + distribution, see + + Wikipedia - Student's t-distribution. + + We use a slightly generalized version (compared to + Wikipedia) of the Student t-distribution. Namely, one which also + parameterizes the location and scale. See the book "Bayesian Data + Analysis" by Gelman et al. for more details. + The density of the Student t-distribution p(x|mu,scale,dof) = + Gamma((dof+1)/2) (1 + (x - mu)^2 / (scale * scale * dof))^(-(dof+1)/2) / + (Gamma(dof/2)*Sqrt(dof*pi*scale)). + The distribution will use the by + default. Users can get/set the random number generator by using the + property. + The statistics classes will check all the incoming parameters + whether they are in the allowed range. This might involve heavy + computation. Optionally, by setting Control.CheckDistributionParameters + to false, all parameter checks can be turned off. + + + + Initializes a new instance of the StudentT class. This is a Student t-distribution with location 0.0 + scale 1.0 and degrees of freedom 1. + + + + + Initializes a new instance of the StudentT class with a particular location, scale and degrees of + freedom. + + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + + + + Initializes a new instance of the StudentT class with a particular location, scale and degrees of + freedom. + + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + + + + Gets the location (μ) of the Student t-distribution. + + + + + Gets the scale (σ) of the Student t-distribution. Range: σ > 0. + + + + + Gets the degrees of freedom (ν) of the Student t-distribution. Range: ν > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the Student t-distribution. + + + + + Gets the variance of the Student t-distribution. + + + + + Gets the standard deviation of the Student t-distribution. + + + + + Gets the entropy of the Student t-distribution. + + + + + Gets the skewness of the Student t-distribution. + + + + + Gets the mode of the Student t-distribution. + + + + + Gets the median of the Student t-distribution. + + + + + Gets the minimum of the Student t-distribution. + + + + + Gets the maximum of the Student t-distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Samples student-t distributed random variables. + + The algorithm is method 2 in section 5, chapter 9 + in L. Devroye's "Non-Uniform Random Variate Generation" + The random number generator to use. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + a random number from the standard student-t distribution. + + + + Generates a sample from the Student t-distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Student t-distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Generates a sample from the Student t-distribution. + + The random number generator to use. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the Student t-distribution using the Box-Muller algorithm. + + The random number generator to use. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the Student t-distribution. + + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the Student t-distribution using the Box-Muller algorithm. + + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + a sequence of samples from the distribution. + + + + Triangular distribution. + For details, see Wikipedia - Triangular distribution. + + The distribution will use the by default. + Users can get/set the random number generator by using the property. + The statistics classes will check whether all the incoming parameters are in the allowed range. This might involve heavy computation. Optionally, by setting Control.CheckDistributionParameters + to false, all parameter checks can be turned off. + + + + Initializes a new instance of the Triangular class with the given lower bound, upper bound and mode. + + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + If the upper bound is smaller than the mode or if the mode is smaller than the lower bound. + + + + Initializes a new instance of the Triangular class with the given lower bound, upper bound and mode. + + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + The random number generator which is used to draw random samples. + If the upper bound is smaller than the mode or if the mode is smaller than the lower bound. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + + + + Gets the lower bound of the distribution. + + + + + Gets the upper bound of the distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + + Gets the skewness of the distribution. + + + + + Gets or sets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Generates a sample from the Triangular distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Triangular distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + the inverse cumulative density at . + + + + + Generates a sample from the Triangular distribution. + + The random number generator to use. + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + a sample from the distribution. + + + + Generates a sequence of samples from the Triangular distribution. + + The random number generator to use. + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + a sequence of samples from the distribution. + + + + Generates a sample from the Triangular distribution. + + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + a sample from the distribution. + + + + Generates a sequence of samples from the Triangular distribution. + + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + a sequence of samples from the distribution. + + + + Continuous Univariate Weibull distribution. + For details about this distribution, see + Wikipedia - Weibull distribution. + + + The Weibull distribution is parametrized by a shape and scale parameter. + + + + + Reusable intermediate result 1 / (_scale ^ _shape) + + + By caching this parameter we can get slightly better numerics precision + in certain constellations without any additional computations. + + + + + Initializes a new instance of the Weibull class. + + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + + + + Initializes a new instance of the Weibull class. + + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + + + + Gets the shape (k) of the Weibull distribution. Range: k > 0. + + + + + Gets the scale (λ) of the Weibull distribution. Range: λ > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the Weibull distribution. + + + + + Gets the variance of the Weibull distribution. + + + + + Gets the standard deviation of the Weibull distribution. + + + + + Gets the entropy of the Weibull distribution. + + + + + Gets the skewness of the Weibull distribution. + + + + + Gets the mode of the Weibull distribution. + + + + + Gets the median of the Weibull distribution. + + + + + Gets the minimum of the Weibull distribution. + + + + + Gets the maximum of the Weibull distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Generates a sample from the Weibull distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Weibull distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + the cumulative distribution at location . + + + + + Implemented according to: Parameter estimation of the Weibull probability distribution, 1994, Hongzhu Qiao, Chris P. Tsokos + + + + Returns a Weibull distribution. + + + + Generates a sample from the Weibull distribution. + + The random number generator to use. + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the Weibull distribution. + + The random number generator to use. + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the Weibull distribution. + + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the Weibull distribution. + + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Multivariate Wishart distribution. This distribution is + parameterized by the degrees of freedom nu and the scale matrix S. The Wishart distribution + is the conjugate prior for the precision (inverse covariance) matrix of the multivariate + normal distribution. + Wikipedia - Wishart distribution. + + + + + The degrees of freedom for the Wishart distribution. + + + + + The scale matrix for the Wishart distribution. + + + + + Caches the Cholesky factorization of the scale matrix. + + + + + Initializes a new instance of the class. + + The degrees of freedom (n) for the Wishart distribution. + The scale matrix (V) for the Wishart distribution. + + + + Initializes a new instance of the class. + + The degrees of freedom (n) for the Wishart distribution. + The scale matrix (V) for the Wishart distribution. + The random number generator which is used to draw random samples. + + + + Tests whether the provided values are valid parameters for this distribution. + + The degrees of freedom (n) for the Wishart distribution. + The scale matrix (V) for the Wishart distribution. + + + + Gets or sets the degrees of freedom (n) for the Wishart distribution. + + + + + Gets or sets the scale matrix (V) for the Wishart distribution. + + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + The mean of the distribution. + + + + Gets the mode of the distribution. + + The mode of the distribution. + + + + Gets the variance of the distribution. + + The variance of the distribution. + + + + Evaluates the probability density function for the Wishart distribution. + + The matrix at which to evaluate the density at. + If the argument does not have the same dimensions as the scale matrix. + the density at . + + + + Samples a Wishart distributed random variable using the method + Algorithm AS 53: Wishart Variate Generator + W. B. Smith and R. R. Hocking + Applied Statistics, Vol. 21, No. 3 (1972), pp. 341-345 + + A random number from this distribution. + + + + Samples a Wishart distributed random variable using the method + Algorithm AS 53: Wishart Variate Generator + W. B. Smith and R. R. Hocking + Applied Statistics, Vol. 21, No. 3 (1972), pp. 341-345 + + The random number generator to use. + The degrees of freedom (n) for the Wishart distribution. + The scale matrix (V) for the Wishart distribution. + a sequence of samples from the distribution. + + + + Samples the distribution. + + The random number generator to use. + The degrees of freedom (n) for the Wishart distribution. + The scale matrix (V) for the Wishart distribution. + The cholesky decomposition to use. + a random number from the distribution. + + + + Discrete Univariate Zipf distribution. + Zipf's law, an empirical law formulated using mathematical statistics, refers to the fact + that many types of data studied in the physical and social sciences can be approximated with + a Zipfian distribution, one of a family of related discrete power law probability distributions. + For details about this distribution, see + Wikipedia - Zipf distribution. + + + + + The s parameter of the distribution. + + + + + The n parameter of the distribution. + + + + + Initializes a new instance of the class. + + The s parameter of the distribution. + The n parameter of the distribution. + + + + Initializes a new instance of the class. + + The s parameter of the distribution. + The n parameter of the distribution. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The s parameter of the distribution. + The n parameter of the distribution. + + + + Gets or sets the s parameter of the distribution. + + + + + Gets or sets the n parameter of the distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The s parameter of the distribution. + The n parameter of the distribution. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The s parameter of the distribution. + The n parameter of the distribution. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The s parameter of the distribution. + The n parameter of the distribution. + the cumulative distribution at location . + + + + + Generates a sample from the Zipf distribution without doing parameter checking. + + The random number generator to use. + The s parameter of the distribution. + The n parameter of the distribution. + a random number from the Zipf distribution. + + + + Draws a random sample from the distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of zipf distributed random variables. + + a sequence of samples from the distribution. + + + + Samples a random variable. + + The random number generator to use. + The s parameter of the distribution. + The n parameter of the distribution. + + + + Samples a sequence of this random variable. + + The random number generator to use. + The s parameter of the distribution. + The n parameter of the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The s parameter of the distribution. + The n parameter of the distribution. + + + + Samples a random variable. + + The s parameter of the distribution. + The n parameter of the distribution. + + + + Samples a sequence of this random variable. + + The s parameter of the distribution. + The n parameter of the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The s parameter of the distribution. + The n parameter of the distribution. + + + + Integer number theory functions. + + + + + Canonical Modulus. The result has the sign of the divisor. + + + + + Canonical Modulus. The result has the sign of the divisor. + + + + + Canonical Modulus. The result has the sign of the divisor. + + + + + Canonical Modulus. The result has the sign of the divisor. + + + + + Remainder (% operator). The result has the sign of the dividend. + + + + + Remainder (% operator). The result has the sign of the dividend. + + + + + Remainder (% operator). The result has the sign of the dividend. + + + + + Remainder (% operator). The result has the sign of the dividend. + + + + + Find out whether the provided 32 bit integer is an even number. + + The number to very whether it's even. + True if and only if it is an even number. + + + + Find out whether the provided 64 bit integer is an even number. + + The number to very whether it's even. + True if and only if it is an even number. + + + + Find out whether the provided 32 bit integer is an odd number. + + The number to very whether it's odd. + True if and only if it is an odd number. + + + + Find out whether the provided 64 bit integer is an odd number. + + The number to very whether it's odd. + True if and only if it is an odd number. + + + + Find out whether the provided 32 bit integer is a perfect power of two. + + The number to very whether it's a power of two. + True if and only if it is a power of two. + + + + Find out whether the provided 64 bit integer is a perfect power of two. + + The number to very whether it's a power of two. + True if and only if it is a power of two. + + + + Find out whether the provided 32 bit integer is a perfect square, i.e. a square of an integer. + + The number to very whether it's a perfect square. + True if and only if it is a perfect square. + + + + Find out whether the provided 64 bit integer is a perfect square, i.e. a square of an integer. + + The number to very whether it's a perfect square. + True if and only if it is a perfect square. + + + + Raises 2 to the provided integer exponent (0 <= exponent < 31). + + The exponent to raise 2 up to. + 2 ^ exponent. + + + + + Raises 2 to the provided integer exponent (0 <= exponent < 63). + + The exponent to raise 2 up to. + 2 ^ exponent. + + + + + Evaluate the binary logarithm of an integer number. + + Two-step method using a De Bruijn-like sequence table lookup. + + + + Find the closest perfect power of two that is larger or equal to the provided + 32 bit integer. + + The number of which to find the closest upper power of two. + A power of two. + + + + + Find the closest perfect power of two that is larger or equal to the provided + 64 bit integer. + + The number of which to find the closest upper power of two. + A power of two. + + + + + Returns the greatest common divisor (gcd) of two integers using Euclid's algorithm. + + First Integer: a. + Second Integer: b. + Greatest common divisor gcd(a,b) + + + + Returns the greatest common divisor (gcd) of a set of integers using Euclid's + algorithm. + + List of Integers. + Greatest common divisor gcd(list of integers) + + + + Returns the greatest common divisor (gcd) of a set of integers using Euclid's algorithm. + + List of Integers. + Greatest common divisor gcd(list of integers) + + + + Computes the extended greatest common divisor, such that a*x + b*y = gcd(a,b). + + First Integer: a. + Second Integer: b. + Resulting x, such that a*x + b*y = gcd(a,b). + Resulting y, such that a*x + b*y = gcd(a,b) + Greatest common divisor gcd(a,b) + + + long x,y,d; + d = Fn.GreatestCommonDivisor(45,18,out x, out y); + -> d == 9 && x == 1 && y == -2 + + The gcd of 45 and 18 is 9: 18 = 2*9, 45 = 5*9. 9 = 1*45 -2*18, therefore x=1 and y=-2. + + + + + Returns the least common multiple (lcm) of two integers using Euclid's algorithm. + + First Integer: a. + Second Integer: b. + Least common multiple lcm(a,b) + + + + Returns the least common multiple (lcm) of a set of integers using Euclid's algorithm. + + List of Integers. + Least common multiple lcm(list of integers) + + + + Returns the least common multiple (lcm) of a set of integers using Euclid's algorithm. + + List of Integers. + Least common multiple lcm(list of integers) + + + + Collection of functions equivalent to those provided by Microsoft Excel + but backed instead by Math.NET Numerics. + We do not recommend to use them except in an intermediate phase when + porting over solutions previously implemented in Excel. + + + + + An algorithm failed to converge. + + + + + An algorithm failed to converge due to a numerical breakdown. + + + + + An error occured calling native provider function. + + + + + An error occured calling native provider function. + + + + + Native provider was unable to allocate sufficent memory. + + + + + Native provider failed LU inversion do to a singular U matrix. + + + + + Compound Monthly Return or Geometric Return or Annualized Return + + + + + Average Gain or Gain Mean + This is a simple average (arithmetic mean) of the periods with a gain. It is calculated by summing the returns for gain periods (return 0) + and then dividing the total by the number of gain periods. + + http://www.offshore-library.com/kb/statistics.php + + + + Average Loss or LossMean + This is a simple average (arithmetic mean) of the periods with a loss. It is calculated by summing the returns for loss periods (return < 0) + and then dividing the total by the number of loss periods. + + http://www.offshore-library.com/kb/statistics.php + + + + Calculation is similar to Standard Deviation , except it calculates an average (mean) return only for periods with a gain + and measures the variation of only the gain periods around the gain mean. Measures the volatility of upside performance. + © Copyright 1996, 1999 Gary L.Gastineau. First Edition. © 1992 Swiss Bank Corporation. + + + + + Similar to standard deviation, except this statistic calculates an average (mean) return for only the periods with a loss and then + measures the variation of only the losing periods around this loss mean. This statistic measures the volatility of downside performance. + + http://www.offshore-library.com/kb/statistics.php + + + + This measure is similar to the loss standard deviation except the downside deviation + considers only returns that fall below a defined minimum acceptable return (MAR) rather than the arithmetic mean. + For example, if the MAR is 7%, the downside deviation would measure the variation of each period that falls below + 7%. (The loss standard deviation, on the other hand, would take only losing periods, calculate an average return for + the losing periods, and then measure the variation between each losing return and the losing return average). + + + + + A measure of volatility in returns below the mean. It's similar to standard deviation, but it only + looks at periods where the investment return was less than average return. + + + + + Measures a fund’s average gain in a gain period divided by the fund’s average loss in a losing + period. Periods can be monthly or quarterly depending on the data frequency. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Example: 1e-14. + Maximum number of iterations. Example: 100. + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first derivative of the function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Example: 1e-14. + Maximum number of iterations. Example: 100. + + + + Find both complex roots of the quadratic equation c + b*x + a*x^2 = 0. + Note the special coefficient order ascending by exponent (consistent with polynomials). + + + + + Find all three complex roots of the cubic equation d + c*x + b*x^2 + a*x^3 = 0. + Note the special coefficient order ascending by exponent (consistent with polynomials). + + + + + Find all roots of the Chebychev polynomial of the first kind. + + The polynomial order and therefore the number of roots. + The real domain interval begin where to start sampling. + The real domain interval end where to stop sampling. + Samples in [a,b] at (b+a)/2+(b-1)/2*cos(pi*(2i-1)/(2n)) + + + + Find all roots of the Chebychev polynomial of the second kind. + + The polynomial order and therefore the number of roots. + The real domain interval begin where to start sampling. + The real domain interval end where to stop sampling. + Samples in [a,b] at (b+a)/2+(b-1)/2*cos(pi*i/(n-1)) + + + + Least-Squares Curve Fitting Routines + + + + + Least-Squares fitting the points (x,y) to a line y : x -> a+b*x, + returning its best fitting parameters as [a, b] array, + where a is the intercept and b the slope. + + + + + Least-Squares fitting the points (x,y) to a line y : x -> a+b*x, + returning a function y' for the best fitting line. + + + + + Least-Squares fitting the points (X,y) = ((x0,x1,..,xk),y) to a linear surface y : X -> p0*x0 + p1*x1 + ... + pk*xk, + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + If an intercept is added, its coefficient will be prepended to the resulting parameters. + + + + + Least-Squares fitting the points (X,y) = ((x0,x1,..,xk),y) to a linear surface y : X -> p0*x0 + p1*x1 + ... + pk*xk, + returning a function y' for the best fitting combination. + If an intercept is added, its coefficient will be prepended to the resulting parameters. + + + + + Weighted Least-Squares fitting the points (X,y) = ((x0,x1,..,xk),y) and weights w to a linear surface y : X -> p0*x0 + p1*x1 + ... + pk*xk, + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + + + + + Least-Squares fitting the points (x,y) to a k-order polynomial y : x -> p0 + p1*x + p2*x^2 + ... + pk*x^k, + returning its best fitting parameters as [p0, p1, p2, ..., pk] array, compatible with Evaluate.Polynomial. + A polynomial with order/degree k has (k+1) coefficients and thus requires at least (k+1) samples. + + + + + Least-Squares fitting the points (x,y) to a k-order polynomial y : x -> p0 + p1*x + p2*x^2 + ... + pk*x^k, + returning a function y' for the best fitting polynomial. + A polynomial with order/degree k has (k+1) coefficients and thus requires at least (k+1) samples. + + + + + Weighted Least-Squares fitting the points (x,y) and weights w to a k-order polynomial y : x -> p0 + p1*x + p2*x^2 + ... + pk*x^k, + returning its best fitting parameters as [p0, p1, p2, ..., pk] array, compatible with Evaluate.Polynomial. + A polynomial with order/degree k has (k+1) coefficients and thus requires at least (k+1) samples. + + + + + Least-Squares fitting the points (x,y) to an arbitrary linear combination y : x -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + + + + + Least-Squares fitting the points (x,y) to an arbitrary linear combination y : x -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning a function y' for the best fitting combination. + + + + + Least-Squares fitting the points (x,y) to an arbitrary linear combination y : x -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + + + + + Least-Squares fitting the points (x,y) to an arbitrary linear combination y : x -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning a function y' for the best fitting combination. + + + + + Least-Squares fitting the points (X,y) = ((x0,x1,..,xk),y) to an arbitrary linear combination y : X -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + + + + + Least-Squares fitting the points (X,y) = ((x0,x1,..,xk),y) to an arbitrary linear combination y : X -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning a function y' for the best fitting combination. + + + + + Least-Squares fitting the points (X,y) = ((x0,x1,..,xk),y) to an arbitrary linear combination y : X -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + + + + + Least-Squares fitting the points (X,y) = ((x0,x1,..,xk),y) to an arbitrary linear combination y : X -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning a function y' for the best fitting combination. + + + + + Least-Squares fitting the points (T,y) = (T,y) to an arbitrary linear combination y : X -> p0*f0(T) + p1*f1(T) + ... + pk*fk(T), + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + + + + + Least-Squares fitting the points (T,y) = (T,y) to an arbitrary linear combination y : X -> p0*f0(T) + p1*f1(T) + ... + pk*fk(T), + returning a function y' for the best fitting combination. + + + + + Least-Squares fitting the points (T,y) = (T,y) to an arbitrary linear combination y : X -> p0*f0(T) + p1*f1(T) + ... + pk*fk(T), + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + + + + + Least-Squares fitting the points (T,y) = (T,y) to an arbitrary linear combination y : X -> p0*f0(T) + p1*f1(T) + ... + pk*fk(T), + returning a function y' for the best fitting combination. + + + + + Generate samples by sampling a function at the provided points. + + + + + Generate a sample sequence by sampling a function at the provided point sequence. + + + + + Generate samples by sampling a function at the provided points. + + + + + Generate a sample sequence by sampling a function at the provided point sequence. + + + + + Generate a linearly spaced sample vector of the given length between the specified values (inclusive). + Equivalent to MATLAB linspace but with the length as first instead of last argument. + + + + + Generate samples by sampling a function at linearly spaced points between the specified values (inclusive). + + + + + Generate a base 10 logarithmically spaced sample vector of the given length between the specified decade exponents (inclusive). + Equivalent to MATLAB logspace but with the length as first instead of last argument. + + + + + Generate samples by sampling a function at base 10 logarithmically spaced points between the specified decade exponents (inclusive). + + + + + Generate a linearly spaced sample vector within the inclusive interval (start, stop) and step 1. + Equivalent to MATLAB colon operator (:). + + + + + Generate a linearly spaced sample vector within the inclusive interval (start, stop) and step 1. + Equivalent to MATLAB colon operator (:). + + + + + Generate a linearly spaced sample vector within the inclusive interval (start, stop) and the provided step. + The start value is aways included as first value, but stop is only included if it stop-start is a multiple of step. + Equivalent to MATLAB double colon operator (::). + + + + + Generate a linearly spaced sample vector within the inclusive interval (start, stop) and the provided step. + The start value is aways included as first value, but stop is only included if it stop-start is a multiple of step. + Equivalent to MATLAB double colon operator (::). + + + + + Generate a linearly spaced sample vector within the inclusive interval (start, stop) and the provide step. + The start value is aways included as first value, but stop is only included if it stop-start is a multiple of step. + Equivalent to MATLAB double colon operator (::). + + + + + Generate samples by sampling a function at linearly spaced points within the inclusive interval (start, stop) and the provide step. + The start value is aways included as first value, but stop is only included if it stop-start is a multiple of step. + + + + + Create a periodic wave. + + The number of samples to generate. + Samples per time unit (Hz). Must be larger than twice the frequency to satisfy the Nyquist criterion. + Frequency in periods per time unit (Hz). + The length of the period when sampled at one sample per time unit. This is the interval of the periodic domain, a typical value is 1.0, or 2*Pi for angular functions. + Optional phase offset. + Optional delay, relative to the phase. + + + + Create a periodic wave. + + The number of samples to generate. + The function to apply to each of the values and evaluate the resulting sample. + Samples per time unit (Hz). Must be larger than twice the frequency to satisfy the Nyquist criterion. + Frequency in periods per time unit (Hz). + The length of the period when sampled at one sample per time unit. This is the interval of the periodic domain, a typical value is 1.0, or 2*Pi for angular functions. + Optional phase offset. + Optional delay, relative to the phase. + + + + Create an infinite periodic wave sequence. + + Samples per time unit (Hz). Must be larger than twice the frequency to satisfy the Nyquist criterion. + Frequency in periods per time unit (Hz). + The length of the period when sampled at one sample per time unit. This is the interval of the periodic domain, a typical value is 1.0, or 2*Pi for angular functions. + Optional phase offset. + Optional delay, relative to the phase. + + + + Create an infinite periodic wave sequence. + + The function to apply to each of the values and evaluate the resulting sample. + Samples per time unit (Hz). Must be larger than twice the frequency to satisfy the Nyquist criterion. + Frequency in periods per time unit (Hz). + The length of the period when sampled at one sample per time unit. This is the interval of the periodic domain, a typical value is 1.0, or 2*Pi for angular functions. + Optional phase offset. + Optional delay, relative to the phase. + + + + Create a Sine wave. + + The number of samples to generate. + Samples per time unit (Hz). Must be larger than twice the frequency to satisfy the Nyquist criterion. + Frequency in periods per time unit (Hz). + The maximal reached peak. + The mean, or DC part, of the signal. + Optional phase offset. + Optional delay, relative to the phase. + + + + Create an infinite Sine wave sequence. + + Samples per unit. + Frequency in samples per unit. + The maximal reached peak. + The mean, or DC part, of the signal. + Optional phase offset. + Optional delay, relative to the phase. + + + + Create a periodic square wave, starting with the high phase. + + The number of samples to generate. + Number of samples of the high phase. + Number of samples of the low phase. + Sample value to be emitted during the low phase. + Sample value to be emitted during the high phase. + Optional delay. + + + + Create an infinite periodic square wave sequence, starting with the high phase. + + Number of samples of the high phase. + Number of samples of the low phase. + Sample value to be emitted during the low phase. + Sample value to be emitted during the high phase. + Optional delay. + + + + Create a periodic triangle wave, starting with the raise phase from the lowest sample. + + The number of samples to generate. + Number of samples of the raise phase. + Number of samples of the fall phase. + Lowest sample value. + Highest sample value. + Optional delay. + + + + Create an infinite periodic triangle wave sequence, starting with the raise phase from the lowest sample. + + Number of samples of the raise phase. + Number of samples of the fall phase. + Lowest sample value. + Highest sample value. + Optional delay. + + + + Create a periodic sawtooth wave, starting with the lowest sample. + + The number of samples to generate. + Number of samples a full sawtooth period. + Lowest sample value. + Highest sample value. + Optional delay. + + + + Create an infinite periodic sawtooth wave sequence, starting with the lowest sample. + + Number of samples a full sawtooth period. + Lowest sample value. + Highest sample value. + Optional delay. + + + + Create an array with each field set to the same value. + + The number of samples to generate. + The value that each field should be set to. + + + + Create an infinite sequence where each element has the same value. + + The value that each element should be set to. + + + + Create a Heaviside Step sample vector. + + The number of samples to generate. + The maximal reached peak. + Offset to the time axis. + + + + Create an infinite Heaviside Step sample sequence. + + The maximal reached peak. + Offset to the time axis. + + + + Create a Kronecker Delta impulse sample vector. + + The number of samples to generate. + The maximal reached peak. + Offset to the time axis. Zero or positive. + + + + Create a Kronecker Delta impulse sample vector. + + The maximal reached peak. + Offset to the time axis, hence the sample index of the impulse. + + + + Create a periodic Kronecker Delta impulse sample vector. + + The number of samples to generate. + impulse sequence period. + The maximal reached peak. + Offset to the time axis. Zero or positive. + + + + Create a Kronecker Delta impulse sample vector. + + impulse sequence period. + The maximal reached peak. + Offset to the time axis. Zero or positive. + + + + Generate samples generated by the given computation. + + + + + Generate an infinite sequence generated by the given computation. + + + + + Create random samples, uniform between 0 and 1. + Faster than other methods but with reduced guarantees on randomness. + + + + + Create an infinite random sample sequence, uniform between 0 and 1. + Faster than other methods but with reduced guarantees on randomness. + + + + + Generate samples by sampling a function at samples from a probability distribution, uniform between 0 and 1. + Faster than other methods but with reduced guarantees on randomness. + + + + + Generate a sample sequence by sampling a function at samples from a probability distribution, uniform between 0 and 1. + Faster than other methods but with reduced guarantees on randomness. + + + + + Generate samples by sampling a function at sample pairs from a probability distribution, uniform between 0 and 1. + Faster than other methods but with reduced guarantees on randomness. + + + + + Generate a sample sequence by sampling a function at sample pairs from a probability distribution, uniform between 0 and 1. + Faster than other methods but with reduced guarantees on randomness. + + + + + Create samples with independent amplitudes of standard distribution. + + + + + Create an infinite sample sequence with independent amplitudes of standard distribution. + + + + + Create samples with independent amplitudes of normal distribution and a flat spectral density. + + + + + Create an infinite sample sequence with independent amplitudes of normal distribution and a flat spectral density. + + + + + Create samples with independent amplitudes of normal distribution and a flat spectral density. + + + + + Create an infinite sample sequence with independent amplitudes of normal distribution and a flat spectral density. + + + + + Create skew alpha stable samples. + + The number of samples to generate. + Stability alpha-parameter of the stable distribution + Skewness beta-parameter of the stable distribution + Scale c-parameter of the stable distribution + Location mu-parameter of the stable distribution + + + + Create skew alpha stable samples. + + Stability alpha-parameter of the stable distribution + Skewness beta-parameter of the stable distribution + Scale c-parameter of the stable distribution + Location mu-parameter of the stable distribution + + + + Create random samples. + + + + + Create an infinite random sample sequence. + + + + + Create random samples. + + + + + Create an infinite random sample sequence. + + + + + Create random samples. + + + + + Create an infinite random sample sequence. + + + + + Create random samples. + + + + + Create an infinite random sample sequence. + + + + + Generate samples by sampling a function at samples from a probability distribution. + + + + + Generate a sample sequence by sampling a function at samples from a probability distribution. + + + + + Generate samples by sampling a function at sample pairs from a probability distribution. + + + + + Generate a sample sequence by sampling a function at sample pairs from a probability distribution. + + + + + Globalized String Handling Helpers + + + + + Tries to get a from the format provider, + returning the current culture if it fails. + + + An that supplies culture-specific + formatting information. + + A instance. + + + + Tries to get a from the format + provider, returning the current culture if it fails. + + + An that supplies culture-specific + formatting information. + + A instance. + + + + Tries to get a from the format provider, returning the current culture if it fails. + + + An that supplies culture-specific + formatting information. + + A instance. + + + + Globalized Parsing: Tokenize a node by splitting it into several nodes. + + Node that contains the trimmed string to be tokenized. + List of keywords to tokenize by. + keywords to skip looking for (because they've already been handled). + + + + Globalized Parsing: Parse a double number + + First token of the number. + The parsed double number using the current culture information. + + + + + Globalized Parsing: Parse a float number + + First token of the number. + The parsed float number using the current culture information. + + + + + Calculates the R-Squared value, also known as coefficient of determination, + given modelled and observed values + + The values expected from the modelled + The actual data set values obtained + Squared Person product-momentum correlation coefficient. + + + + Calculates the R value, also known as linear correlation coefficient, + given modelled and observed values + + The values expected from the modelled + The actual data set values obtained + Person product-momentum correlation coefficient. + + + + Calculates the Standard Error of the regression, given a sequence of + modeled/predicted values, and a sequence of actual/observed values + + The modelled/predicted values + The observed/actual values + The Standard Error of the regression + + + + Calculates the Standard Error of the regression, given a sequence of + modeled/predicted values, and a sequence of actual/observed values + + The modelled/predicted values + The observed/actual values + The degrees of freedom by which the + number of samples is reduced for performing the Standard Error calculation + The Standard Error of the regression + + + + Complex Fast (FFT) Implementation of the Discrete Fourier Transform (DFT). + + + Complex Fast (FFT) Implementation of the Discrete Fourier Transform (DFT). + + + Complex Fast (FFT) Implementation of the Discrete Fourier Transform (DFT). + + + Complex Fast (FFT) Implementation of the Discrete Fourier Transform (DFT). + + + + + Sequences with length greater than Math.Sqrt(Int32.MaxValue) + 1 + will cause k*k in the Bluestein sequence to overflow (GH-286). + + + + + Generate the bluestein sequence for the provided problem size. + + Number of samples. + Bluestein sequence exp(I*Pi*k^2/N) + + + + Convolution with the bluestein sequence (Parallel Version). + + Sample Vector. + + + + Swap the real and imaginary parts of each sample. + + Sample Vector. + + + + Bluestein generic FFT for arbitrary sized sample vectors. + + Time-space sample vector. + Fourier series exponent sign. + + + + Applies the forward Fast Fourier Transform (FFT) to arbitrary-length sample vectors. + + Sample vector, where the FFT is evaluated in place. + + + + Applies the forward Fast Fourier Transform (FFT) to arbitrary-length sample vectors. + + Sample vector, where the FFT is evaluated in place. + Fourier Transform Convention Options. + + + + Applies the forward Fast Fourier Transform (FFT) to arbitrary-length sample vectors. + + Real part of the sample vector, where the FFT is evaluated in place. + Imaginary part of the sample vector, where the FFT is evaluated in place. + Fourier Transform Convention Options. + + + + Packed Real-Complex forward Fast Fourier Transform (FFT) to arbitrary-length sample vectors. + Since for real-valued time samples the complex spectrum is conjugate-even (symmetry), + the spectrum can be fully reconstructed form the positive frequencies only (first half). + The data array needs to be N+2 (if N is even) or N+1 (if N is odd) long in order to support such a packed spectrum. + + Data array of length N+2 (if N is even) or N+1 (if N is odd). + The number of samples. + Fourier Transform Convention Options. + + + + Applies the forward Fast Fourier Transform (FFT) to multiple dimensional sample data. + + Sample data, where the FFT is evaluated in place. + + The data size per dimension. The first dimension is the major one. + For example, with two dimensions "rows" and "columns" the samples are assumed to be organized row by row. + + Fourier Transform Convention Options. + + + + Applies the forward Fast Fourier Transform (FFT) to two dimensional sample data. + + Sample data, organized row by row, where the FFT is evaluated in place + The number of rows. + The number of columns. + Data available organized column by column instead of row by row can be processed directly by swapping the rows and columns arguments. + Fourier Transform Convention Options. + + + + Applies the forward Fast Fourier Transform (FFT) to a two dimensional data in form of a matrix. + + Sample matrix, where the FFT is evaluated in place + Fourier Transform Convention Options. + + + + Applies the inverse Fast Fourier Transform (iFFT) to arbitrary-length sample vectors. + + Spectrum data, where the iFFT is evaluated in place. + + + + Applies the inverse Fast Fourier Transform (iFFT) to arbitrary-length sample vectors. + + Spectrum data, where the iFFT is evaluated in place. + Fourier Transform Convention Options. + + + + Applies the inverse Fast Fourier Transform (iFFT) to arbitrary-length sample vectors. + + Real part of the sample vector, where the iFFT is evaluated in place. + Imaginary part of the sample vector, where the iFFT is evaluated in place. + Fourier Transform Convention Options. + + + + Packed Real-Complex inverse Fast Fourier Transform (iFFT) to arbitrary-length sample vectors. + Since for real-valued time samples the complex spectrum is conjugate-even (symmetry), + the spectrum can be fully reconstructed form the positive frequencies only (first half). + The data array needs to be N+2 (if N is even) or N+1 (if N is odd) long in order to support such a packed spectrum. + + Data array of length N+2 (if N is even) or N+1 (if N is odd). + The number of samples. + Fourier Transform Convention Options. + + + + Applies the inverse Fast Fourier Transform (iFFT) to multiple dimensional sample data. + + Spectrum data, where the iFFT is evaluated in place. + + The data size per dimension. The first dimension is the major one. + For example, with two dimensions "rows" and "columns" the samples are assumed to be organized row by row. + + Fourier Transform Convention Options. + + + + Applies the inverse Fast Fourier Transform (iFFT) to two dimensional sample data. + + Sample data, organized row by row, where the iFFT is evaluated in place + The number of rows. + The number of columns. + Data available organized column by column instead of row by row can be processed directly by swapping the rows and columns arguments. + Fourier Transform Convention Options. + + + + Applies the inverse Fast Fourier Transform (iFFT) to a two dimensional data in form of a matrix. + + Sample matrix, where the iFFT is evaluated in place + Fourier Transform Convention Options. + + + + Naive forward DFT, useful e.g. to verify faster algorithms. + + Time-space sample vector. + Fourier Transform Convention Options. + Corresponding frequency-space vector. + + + + Naive inverse DFT, useful e.g. to verify faster algorithms. + + Frequency-space sample vector. + Fourier Transform Convention Options. + Corresponding time-space vector. + + + + Radix-2 forward FFT for power-of-two sized sample vectors. + + Sample vector, where the FFT is evaluated in place. + Fourier Transform Convention Options. + + + + + Radix-2 inverse FFT for power-of-two sized sample vectors. + + Sample vector, where the FFT is evaluated in place. + Fourier Transform Convention Options. + + + + + Bluestein forward FFT for arbitrary sized sample vectors. + + Sample vector, where the FFT is evaluated in place. + Fourier Transform Convention Options. + + + + Bluestein inverse FFT for arbitrary sized sample vectors. + + Sample vector, where the FFT is evaluated in place. + Fourier Transform Convention Options. + + + + Extract the exponent sign to be used in forward transforms according to the + provided convention options. + + Fourier Transform Convention Options. + Fourier series exponent sign. + + + + Rescale FFT-the resulting vector according to the provided convention options. + + Fourier Transform Convention Options. + Sample Vector. + + + + Rescale the iFFT-resulting vector according to the provided convention options. + + Fourier Transform Convention Options. + Sample Vector. + + + + Generate the frequencies corresponding to each index in frequency space. + The frequency space has a resolution of sampleRate/N. + Index 0 corresponds to the DC part, the following indices correspond to + the positive frequencies up to the Nyquist frequency (sampleRate/2), + followed by the negative frequencies wrapped around. + + Number of samples. + The sampling rate of the time-space data. + + + + Naive generic DFT, useful e.g. to verify faster algorithms. + + Time-space sample vector. + Fourier series exponent sign. + Corresponding frequency-space vector. + + + + Radix-2 Reorder Helper Method + + Sample type + Sample vector + + + + Radix-2 Step Helper Method + + Sample vector. + Fourier series exponent sign. + Level Group Size. + Index inside of the level. + + + + Radix-2 generic FFT for power-of-two sized sample vectors. + + Sample vector, where the FFT is evaluated in place. + Fourier series exponent sign. + + + + + Radix-2 generic FFT for power-of-two sample vectors (Parallel Version). + + Sample vector, where the FFT is evaluated in place. + Fourier series exponent sign. + + + + + Fourier Transform Convention + + + + + Inverse integrand exponent (forward: positive sign; inverse: negative sign). + + + + + Only scale by 1/N in the inverse direction; No scaling in forward direction. + + + + + Don't scale at all (neither on forward nor on inverse transformation). + + + + + Universal; Symmetric scaling and common exponent (used in Maple). + + + + + Only scale by 1/N in the inverse direction; No scaling in forward direction (used in Matlab). [= AsymmetricScaling] + + + + + Inverse integrand exponent; No scaling at all (used in all Numerical Recipes based implementations). [= InverseExponent | NoScaling] + + + + + Fast (FHT) Implementation of the Discrete Hartley Transform (DHT). + + + Fast (FHT) Implementation of the Discrete Hartley Transform (DHT). + + + + + Naive forward DHT, useful e.g. to verify faster algorithms. + + Time-space sample vector. + Hartley Transform Convention Options. + Corresponding frequency-space vector. + + + + Naive inverse DHT, useful e.g. to verify faster algorithms. + + Frequency-space sample vector. + Hartley Transform Convention Options. + Corresponding time-space vector. + + + + Rescale FFT-the resulting vector according to the provided convention options. + + Fourier Transform Convention Options. + Sample Vector. + + + + Rescale the iFFT-resulting vector according to the provided convention options. + + Fourier Transform Convention Options. + Sample Vector. + + + + Naive generic DHT, useful e.g. to verify faster algorithms. + + Time-space sample vector. + Corresponding frequency-space vector. + + + + Hartley Transform Convention + + + + + Only scale by 1/N in the inverse direction; No scaling in forward direction. + + + + + Don't scale at all (neither on forward nor on inverse transformation). + + + + + Universal; Symmetric scaling. + + + + + Numerical Integration (Quadrature). + + + + + Approximation of the definite integral of an analytic smooth function on a closed interval. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + The expected relative accuracy of the approximation. + Approximation of the finite integral in the given interval. + + + + Approximation of the definite integral of an analytic smooth function on a closed interval. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Approximation of the finite integral in the given interval. + + + + Approximates a 2-dimensional definite integral using an Nth order Gauss-Legendre rule over the rectangle [a,b] x [c,d]. + + The 2-dimensional analytic smooth function to integrate. + Where the interval starts for the first (inside) integral, exclusive and finite. + Where the interval ends for the first (inside) integral, exclusive and finite. + Where the interval starts for the second (outside) integral, exclusive and finite. + /// Where the interval ends for the second (outside) integral, exclusive and finite. + Defines an Nth order Gauss-Legendre rule. The order also defines the number of abscissas and weights for the rule. Precomputed Gauss-Legendre abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024 are used, otherwise they're calulcated on the fly. + Approximation of the finite integral in the given interval. + + + + Approximates a 2-dimensional definite integral using an Nth order Gauss-Legendre rule over the rectangle [a,b] x [c,d]. + + The 2-dimensional analytic smooth function to integrate. + Where the interval starts for the first (inside) integral, exclusive and finite. + Where the interval ends for the first (inside) integral, exclusive and finite. + Where the interval starts for the second (outside) integral, exclusive and finite. + /// Where the interval ends for the second (outside) integral, exclusive and finite. + Approximation of the finite integral in the given interval. + + + + Analytic integration algorithm for smooth functions with no discontinuities + or derivative discontinuities and no poles inside the interval. + + + + + Maximum number of iterations, until the asked + maximum error is (likely to be) satisfied. + + + + + Approximate the integral by the double exponential transformation + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + The expected relative accuracy of the approximation. + Approximation of the finite integral in the given interval. + + + + Compute the abscissa vector for a single level. + + The level to evaluate the abscissa vector for. + Abscissa Vector. + + + + Compute the weight vector for a single level. + + The level to evaluate the weight vector for. + Weight Vector. + + + + Precomputed abscissa vector per level. + + + + + Precomputed weight vector per level. + + + + + Approximates a definite integral using an Nth order Gauss-Legendre rule. Precomputed Gauss-Legendre abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024 are used, otherwise they're calulcated on the fly. + + + + + Initializes a new instance of the class. + + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Defines an Nth order Gauss-Legendre rule. The order also defines the number of abscissas and weights for the rule. Precomputed Gauss-Legendre abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024 are used, otherwise they're calulcated on the fly. + + + + Gettter for the ith abscissa. + + Index of the ith abscissa. + The ith abscissa. + + + + Getter that returns a clone of the array containing the abscissas. + + + + + Getter for the ith weight. + + Index of the ith weight. + The ith weight. + + + + Getter that returns a clone of the array containing the weights. + + + + + Getter for the order. + + + + + Getter for the InvervalBegin. + + + + + Getter for the InvervalEnd. + + + + + Approximates a definite integral using an Nth order Gauss-Legendre rule. + + The analytic smooth function to integrate. + Where the interval starts, exclusive and finite. + Where the interval ends, exclusive and finite. + Defines an Nth order Gauss-Legendre rule. The order also defines the number of abscissas and weights for the rule. Precomputed Gauss-Legendre abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024 are used, otherwise they're calulcated on the fly. + Approximation of the finite integral in the given interval. + + + + Approximates a 2-dimensional definite integral using an Nth order Gauss-Legendre rule over the rectangle [a,b] x [c,d]. + + The 2-dimensional analytic smooth function to integrate. + Where the interval starts for the first (inside) integral, exclusive and finite. + Where the interval ends for the first (inside) integral, exclusive and finite. + Where the interval starts for the second (outside) integral, exclusive and finite. + /// Where the interval ends for the second (outside) integral, exclusive and finite. + Defines an Nth order Gauss-Legendre rule. The order also defines the number of abscissas and weights for the rule. Precomputed Gauss-Legendre abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024 are used, otherwise they're calulcated on the fly. + Approximation of the finite integral in the given interval. + + + + Contains a method to compute the Gauss-Legendre abscissas/weights and precomputed abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024. + + + Contains a method to compute the Gauss-Legendre abscissas/weights and precomputed abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024. + + + + + Precomputed abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024. + + + + + Computes the Gauss-Legendre abscissas/weights. + See Pavel Holoborodko for a description of the algorithm. + + Defines an Nth order Gauss-Legendre rule. The order also defines the number of abscissas and weights for the rule. + Required precision to compute the abscissas/weights. 1e-10 is usually fine. + Object containing the non-negative abscissas/weights, order, and intervalBegin/intervalEnd. The non-negative abscissas/weights are generated over the interval [-1,1] for the given order. + + + + Creates and maps a Gauss-Legendre point. + + + + + Getter for the GaussPoint. + + Defines an Nth order Gauss-Legendre rule. Precomputed Gauss-Legendre abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024 are used, otherwise they're calulcated on the fly. + Object containing the non-negative abscissas/weights, order, and intervalBegin/intervalEnd. The non-negative abscissas/weights are generated over the interval [-1,1] for the given order. + + + + Getter for the GaussPoint. + + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Defines an Nth order Gauss-Legendre rule. Precomputed Gauss-Legendre abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024 are used, otherwise they're calulcated on the fly. + Object containing the abscissas/weights, order, and intervalBegin/intervalEnd. + + + + Maps the non-negative abscissas/weights from the interval [-1, 1] to the interval [intervalBegin, intervalEnd]. + + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Object containing the non-negative abscissas/weights, order, and intervalBegin/intervalEnd. The non-negative abscissas/weights are generated over the interval [-1,1] for the given order. + Object containing the abscissas/weights, order, and intervalBegin/intervalEnd. + + + + Contains the abscissas/weights, order, and intervalBegin/intervalEnd. + + + + + Approximation algorithm for definite integrals by the Trapezium rule of the Newton-Cotes family. + + + Wikipedia - Trapezium Rule + + + + + Direct 2-point approximation of the definite integral in the provided interval by the trapezium rule. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Approximation of the finite integral in the given interval. + + + + Composite N-point approximation of the definite integral in the provided interval by the trapezium rule. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Number of composite subdivision partitions. + Approximation of the finite integral in the given interval. + + + + Adaptive approximation of the definite integral in the provided interval by the trapezium rule. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + The expected accuracy of the approximation. + Approximation of the finite integral in the given interval. + + + + Adaptive approximation of the definite integral by the trapezium rule. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Abscissa vector per level provider. + Weight vector per level provider. + First Level Step + The expected relative accuracy of the approximation. + Approximation of the finite integral in the given interval. + + + + Approximation algorithm for definite integrals by Simpson's rule. + + + + + Direct 3-point approximation of the definite integral in the provided interval by Simpson's rule. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Approximation of the finite integral in the given interval. + + + + Composite N-point approximation of the definite integral in the provided interval by Simpson's rule. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Even number of composite subdivision partitions. + Approximation of the finite integral in the given interval. + + + + Interpolation Factory. + + + + + Creates an interpolation based on arbitrary points. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.Barycentric.InterpolateRationalFloaterHormannSorted + instead, which is more efficient. + + + + + Create a floater hormann rational pole-free interpolation based on arbitrary points. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.Barycentric.InterpolateRationalFloaterHormannSorted + instead, which is more efficient. + + + + + Create a Bulirsch Stoer rational interpolation based on arbitrary points. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.BulirschStoerRationalInterpolation.InterpolateSorted + instead, which is more efficient. + + + + + Create a barycentric polynomial interpolation where the given sample points are equidistant. + + The sample points t, must be equidistant. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.Barycentric.InterpolatePolynomialEquidistantSorted + instead, which is more efficient. + + + + + Create a Neville polynomial interpolation based on arbitrary points. + If the points happen to be equidistant, consider to use the much more robust PolynomialEquidistant instead. + Otherwise, consider whether RationalWithoutPoles would not be a more robust alternative. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.NevillePolynomialInterpolation.InterpolateSorted + instead, which is more efficient. + + + + + Create a piecewise linear interpolation based on arbitrary points. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.LinearSpline.InterpolateSorted + instead, which is more efficient. + + + + + Create piecewise log-linear interpolation based on arbitrary points. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.LogLinear.InterpolateSorted + instead, which is more efficient. + + + + + Create an piecewise natural cubic spline interpolation based on arbitrary points, + with zero secondary derivatives at the boundaries. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.CubicSpline.InterpolateNaturalSorted + instead, which is more efficient. + + + + + Create an piecewise cubic Akima spline interpolation based on arbitrary points. + Akima splines are robust to outliers. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.CubicSpline.InterpolateAkimaSorted + instead, which is more efficient. + + + + + Create a piecewise cubic Hermite spline interpolation based on arbitrary points + and their slopes/first derivative. + + The sample points t. + The sample point values x(t). + The slope at the sample points. Optimized for arrays. + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.CubicSpline.InterpolateHermiteSorted + instead, which is more efficient. + + + + + Create a step-interpolation based on arbitrary points. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.StepInterpolation.InterpolateSorted + instead, which is more efficient. + + + + + Barycentric Interpolation Algorithm. + + Supports neither differentiation nor integration. + + + Sample points (N), sorted ascendingly. + Sample values (N), sorted ascendingly by x. + Barycentric weights (N), sorted ascendingly by x. + + + + Create a barycentric polynomial interpolation from a set of (x,y) value pairs with equidistant x, sorted ascendingly by x. + + + + + Create a barycentric polynomial interpolation from an unordered set of (x,y) value pairs with equidistant x. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a barycentric polynomial interpolation from an unsorted set of (x,y) value pairs with equidistant x. + + + + + Create a barycentric polynomial interpolation from a set of values related to linearly/equidistant spaced points within an interval. + + + + + Create a barycentric rational interpolation without poles, using Mike Floater and Kai Hormann's Algorithm. + The values are assumed to be sorted ascendingly by x. + + Sample points (N), sorted ascendingly. + Sample values (N), sorted ascendingly by x. + + Order of the interpolation scheme, 0 <= order <= N. + In most cases a value between 3 and 8 gives good results. + + + + + Create a barycentric rational interpolation without poles, using Mike Floater and Kai Hormann's Algorithm. + WARNING: Works in-place and can thus causes the data array to be reordered. + + Sample points (N), no sorting assumed. + Sample values (N). + + Order of the interpolation scheme, 0 <= order <= N. + In most cases a value between 3 and 8 gives good results. + + + + + Create a barycentric rational interpolation without poles, using Mike Floater and Kai Hormann's Algorithm. + + Sample points (N), no sorting assumed. + Sample values (N). + + Order of the interpolation scheme, 0 <= order <= N. + In most cases a value between 3 and 8 gives good results. + + + + + Create a barycentric rational interpolation without poles, using Mike Floater and Kai Hormann's Algorithm. + The values are assumed to be sorted ascendingly by x. + + Sample points (N), sorted ascendingly. + Sample values (N), sorted ascendingly by x. + + + + Create a barycentric rational interpolation without poles, using Mike Floater and Kai Hormann's Algorithm. + WARNING: Works in-place and can thus causes the data array to be reordered. + + Sample points (N), no sorting assumed. + Sample values (N). + + + + Create a barycentric rational interpolation without poles, using Mike Floater and Kai Hormann's Algorithm. + + Sample points (N), no sorting assumed. + Sample values (N). + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. NOT SUPPORTED. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. NOT SUPPORTED. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. NOT SUPPORTED. + + Point t to integrate at. + + + + Definite integral between points a and b. NOT SUPPORTED. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Rational Interpolation (with poles) using Roland Bulirsch and Josef Stoer's Algorithm. + + + + This algorithm supports neither differentiation nor integration. + + + + + Sample Points t, sorted ascendingly. + Sample Values x(t), sorted ascendingly by x. + + + + Create a Bulirsch-Stoer rational interpolation from a set of (x,y) value pairs, sorted ascendingly by x. + + + + + Create a Bulirsch-Stoer rational interpolation from an unsorted set of (x,y) value pairs. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a Bulirsch-Stoer rational interpolation from an unsorted set of (x,y) value pairs. + + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. NOT SUPPORTED. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. NOT SUPPORTED. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. NOT SUPPORTED. + + Point t to integrate at. + + + + Definite integral between points a and b. NOT SUPPORTED. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Cubic Spline Interpolation. + + Supports both differentiation and integration. + + + sample points (N+1), sorted ascending + Zero order spline coefficients (N) + First order spline coefficients (N) + second order spline coefficients (N) + third order spline coefficients (N) + + + + Create a hermite cubic spline interpolation from a set of (x,y) value pairs and their slope (first derivative), sorted ascendingly by x. + + + + + Create a hermite cubic spline interpolation from an unsorted set of (x,y) value pairs and their slope (first derivative). + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a hermite cubic spline interpolation from an unsorted set of (x,y) value pairs and their slope (first derivative). + + + + + Create an Akima cubic spline interpolation from a set of (x,y) value pairs, sorted ascendingly by x. + Akima splines are robust to outliers. + + + + + Create an Akima cubic spline interpolation from an unsorted set of (x,y) value pairs. + Akima splines are robust to outliers. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create an Akima cubic spline interpolation from an unsorted set of (x,y) value pairs. + Akima splines are robust to outliers. + + + + + Create a cubic spline interpolation from a set of (x,y) value pairs, sorted ascendingly by x, + and custom boundary/termination conditions. + + + + + Create a cubic spline interpolation from an unsorted set of (x,y) value pairs and custom boundary/termination conditions. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a cubic spline interpolation from an unsorted set of (x,y) value pairs and custom boundary/termination conditions. + + + + + Create a natural cubic spline interpolation from a set of (x,y) value pairs + and zero second derivatives at the two boundaries, sorted ascendingly by x. + + + + + Create a natural cubic spline interpolation from an unsorted set of (x,y) value pairs + and zero second derivatives at the two boundaries. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a natural cubic spline interpolation from an unsorted set of (x,y) value pairs + and zero second derivatives at the two boundaries. + + + + + Three-Point Differentiation Helper. + + Sample Points t. + Sample Values x(t). + Index of the point of the differentiation. + Index of the first sample. + Index of the second sample. + Index of the third sample. + The derivative approximation. + + + + Tridiagonal Solve Helper. + + The a-vector[n]. + The b-vector[n], will be modified by this function. + The c-vector[n]. + The d-vector[n], will be modified by this function. + The x-vector[n] + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. + + Point t to integrate at. + + + + Definite integral between points a and b. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Find the index of the greatest sample point smaller than t, + or the left index of the closest segment for extrapolation. + + + + + Interpolation within the range of a discrete set of known data points. + + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. + + Point t to integrate at. + + + + Definite integral between points a and b. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Piece-wise Linear Interpolation. + + Supports both differentiation and integration. + + + Sample points (N+1), sorted ascending + Sample values (N or N+1) at the corresponding points; intercept, zero order coefficients + Slopes (N) at the sample points (first order coefficients): N + + + + Create a linear spline interpolation from a set of (x,y) value pairs, sorted ascendingly by x. + + + + + Create a linear spline interpolation from an unsorted set of (x,y) value pairs. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a linear spline interpolation from an unsorted set of (x,y) value pairs. + + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. + + Point t to integrate at. + + + + Definite integral between points a and b. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Find the index of the greatest sample point smaller than t, + or the left index of the closest segment for extrapolation. + + + + + Piece-wise Log-Linear Interpolation + + This algorithm supports differentiation, not integration. + + + + Internal Spline Interpolation + + + + Sample points (N), sorted ascending + Natural logarithm of the sample values (N) at the corresponding points + + + + Create a piecewise log-linear interpolation from a set of (x,y) value pairs, sorted ascendingly by x. + + + + + Create a piecewise log-linear interpolation from an unsorted set of (x,y) value pairs. + WARNING: Works in-place and can thus causes the data array to be reordered and modified. + + + + + Create a piecewise log-linear interpolation from an unsorted set of (x,y) value pairs. + + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. + + Point t to integrate at. + + + + Definite integral between points a and b. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Lagrange Polynomial Interpolation using Neville's Algorithm. + + + + This algorithm supports differentiation, but doesn't support integration. + + + When working with equidistant or Chebyshev sample points it is + recommended to use the barycentric algorithms specialized for + these cases instead of this arbitrary Neville algorithm. + + + + + Sample Points t, sorted ascendingly. + Sample Values x(t), sorted ascendingly by x. + + + + Create a Neville polynomial interpolation from a set of (x,y) value pairs, sorted ascendingly by x. + + + + + Create a Neville polynomial interpolation from an unsorted set of (x,y) value pairs. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a Neville polynomial interpolation from an unsorted set of (x,y) value pairs. + + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. NOT SUPPORTED. + + Point t to integrate at. + + + + Definite integral between points a and b. NOT SUPPORTED. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Quadratic Spline Interpolation. + + Supports both differentiation and integration. + + + sample points (N+1), sorted ascending + Zero order spline coefficients (N) + First order spline coefficients (N) + second order spline coefficients (N) + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. + + Point t to integrate at. + + + + Definite integral between points a and b. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Find the index of the greatest sample point smaller than t, + or the left index of the closest segment for extrapolation. + + + + + Left and right boundary conditions. + + + + + Natural Boundary (Zero second derivative). + + + + + Parabolically Terminated boundary. + + + + + Fixed first derivative at the boundary. + + + + + Fixed second derivative at the boundary. + + + + + A step function where the start of each segment is included, and the last segment is open-ended. + Segment i is [x_i, x_i+1) for i < N, or [x_i, infinity] for i = N. + The domain of the function is all real numbers, such that y = 0 where x <. + + Supports both differentiation and integration. + + + Sample points (N), sorted ascending + Samples values (N) of each segment starting at the corresponding sample point. + + + + Create a linear spline interpolation from a set of (x,y) value pairs, sorted ascendingly by x. + + + + + Create a linear spline interpolation from an unsorted set of (x,y) value pairs. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a linear spline interpolation from an unsorted set of (x,y) value pairs. + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. + + Point t to integrate at. + + + + Definite integral between points a and b. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Find the index of the greatest sample point smaller than t. + + + + + Wraps an interpolation with a transformation of the interpolated values. + + Neither differentiation nor integration is supported. + + + + Create a linear spline interpolation from a set of (x,y) value pairs, sorted ascendingly by x. + + + + + Create a linear spline interpolation from an unsorted set of (x,y) value pairs. + WARNING: Works in-place and can thus causes the data array to be reordered and modified. + + + + + Create a linear spline interpolation from an unsorted set of (x,y) value pairs. + + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. NOT SUPPORTED. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. NOT SUPPORTED. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. NOT SUPPORTED. + + Point t to integrate at. + + + + Definite integral between points a and b. NOT SUPPORTED. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + A Matrix class with dense storage. The underlying storage is a one dimensional array in column-major order (column by column). + + + + + Number of rows. + + Using this instead of the RowCount property to speed up calculating + a matrix index in the data array. + + + + Number of columns. + + Using this instead of the ColumnCount property to speed up calculating + a matrix index in the data array. + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new dense matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new dense matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to be in column-major order (column by column) and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + + Create a new dense matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable. + The enumerable is assumed to be in column-major order (column by column). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix and initialize each value to the same provided value. + + + + + Create a new dense matrix and initialize each value using the provided init function. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new dense matrix with values sampled from the provided random distribution. + + + + + Gets the matrix's data. + + The matrix's data. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of add + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the matrix and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Evaluates whether this matrix is symmetric. + + + + + A vector using dense storage. + + + + + Number of elements + + + + + Gets the vector's data. + + + + + Create a new dense vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new dense vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new dense vector directly binding to a raw array. + The array is used directly without copying. + Very efficient, but changes to the array and the vector will affect each other. + + + + + Create a new dense vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector and initialize each value using the provided value. + + + + + Create a new dense vector and initialize each value using the provided init function. + + + + + Create a new dense vector with values sampled from the provided random distribution. + + + + + Gets the vector's data. + + The vector's data. + + + + Returns a reference to the internal data structure. + + The DenseVector whose internal data we are + returning. + + A reference to the internal date of the given vector. + + + + + Returns a vector bound directly to a reference of the provided array. + + The array to bind to the DenseVector object. + + A DenseVector whose values are bound to the given array. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + The scalar to add. + The vector to store the result of the addition. + + + + Adds another vector to this vector and stores the result into the result vector. + + The vector to add to this one. + The vector to store the result of the addition. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The vector to store the result of the subtraction. + + + + Subtracts another vector to this vector and stores the result into the result vector. + + The vector to subtract from this one. + The vector to store the result of the subtraction. + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Negates vector and saves result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + The scalar to multiply. + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Multiplies a vector with a scalar. + + The vector to scale. + The scalar value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a scalar. + + The scalar value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a scalar. + + The vector to divide. + The scalar value. + The result of the division. + If is . + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The divisor to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The divisor to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + of each element of the vector of the given divisor. + + The vector whose elements we want to compute the remainder of. + The divisor to use, + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Creates a double dense vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n,n,..', '(n,n,..)', '[n,n,...]', where n is a double. + + + A double dense vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a real dense vector to double-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a real dense vector to double-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + A matrix type for diagonal matrices. + + + Diagonal matrices can be non-square matrices but the diagonal always starts + at element 0,0. A diagonal matrix will throw an exception if non diagonal + entries are set. The exception to this is when the off diagonal elements are + 0.0 or NaN; these settings will cause no change to the diagonal matrix. + + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new diagonal matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All diagonal cells of the matrix will be initialized to the provided value, all non-diagonal ones to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to contain the diagonal elements only and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new diagonal matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + The matrix to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + The array to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided enumerable. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new diagonal matrix with diagonal values sampled from the provided random distribution. + + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + If the result matrix's dimensions are not the same as this matrix. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to add. + The matrix to store the result of the division. + + + + Computes the determinant of this matrix. + + The determinant of this matrix. + + + + Returns the elements of the diagonal in a . + + The elements of the diagonal. + For non-square matrices, the method returns Min(Rows, Columns) elements where + i == j (i is the row index, and j is the column index). + + + + Copies the values of the given array to the diagonal. + + The array to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + + Copies the values of the given to the diagonal. + + The vector to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced L2 norm of the matrix. + The largest singular value of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + Calculates the condition number of this matrix. + The condition number of the matrix. + + + Computes the inverse of this matrix. + If is not a square matrix. + If is singular. + The inverse of this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Creates a matrix that contains the values from the requested sub-matrix. + + The row to start copying from. + The number of rows to copy. Must be positive. + The column to start copying from. + The number of columns to copy. Must be positive. + The requested sub-matrix. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + If or + is not positive. + + + + Permute the columns of a matrix according to a permutation. + + The column permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Permute the rows of a matrix according to a permutation. + + The row permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Evaluates whether this matrix is symmetric. + + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + A class which encapsulates the functionality of a Cholesky factorization. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Gets the determinant of the matrix for which the Cholesky matrix was computed. + + + + + Gets the log determinant of the matrix for which the Cholesky matrix was computed. + + + + + A class which encapsulates the functionality of a Cholesky factorization for dense matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Symmetric Householder reduction to tridiagonal form. + + Data array of matrix V (eigenvectors) + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tred2 by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + Data array of matrix V (eigenvectors) + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Nonsymmetric reduction to Hessenberg form. + + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Complex scalar division X/Y. + + Real part of X + Imaginary part of X + Real part of Y + Imaginary part of Y + Division result as a number. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an orthogonal matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Factorize matrix using the modified Gram-Schmidt method. + + Initial matrix. On exit is replaced by Q. + Number of rows in Q. + Number of columns in Q. + On exit is filled by R. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Gets or sets Tau vector. Contains additional information on Q - used for native solver. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The type of QR factorization to perform. + If is null. + If row count is less then column count + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + If SVD algorithm failed to converge with matrix . + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + Matrix V is encoded in the property EigenVectors in the way that: + - column corresponding to real eigenvalue represents real eigenvector, + - columns corresponding to the pair of complex conjugate eigenvalues + lambda[i] and lambda[i+1] encode real and imaginary parts of eigenvectors. + + + + + Gets the absolute value of determinant of the square matrix for which the EVD was computed. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + In the Math.Net implementation we also store a set of pivot elements for increased + numerical stability. The pivot elements encode a permutation matrix P such that P*A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Gets the determinant of the matrix for which the LU factorization was computed. + + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A (m x n) may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + If a factorization is performed, the resulting Q matrix is an m x m matrix + and the R matrix is an m x n matrix. If a factorization is performed, the + resulting Q matrix is an m x n matrix and the R matrix is an n x n matrix. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD). + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets the two norm of the . + + The 2-norm of the . + + + + Gets the condition number max(S) / min(S) + + The condition number. + + + + Gets the determinant of the square matrix for which the SVD was computed. + + + + + A class which encapsulates the functionality of a Cholesky factorization for user matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Symmetric Householder reduction to tridiagonal form. + + The eigen vectors to work on. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tred2 by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + The eigen vectors to work on. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Nonsymmetric reduction to Hessenberg form. + + The eigen vectors to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + The eigen vectors to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Complex scalar division X/Y. + + Real part of X + Imaginary part of X + Real part of Y + Imaginary part of Y + Division result as a number. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an orthogonal matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The QR factorization method to use. + If is null. + + + + Generate column from initial matrix to work array + + Initial matrix + The first row + Column index + Generated vector + + + + Perform calculation of Q or R + + Work array + Q or R matrices + The first row + The last row + The first column + The last column + Number of available CPUs + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + + + + + Calculates absolute value of multiplied on signum function of + + Double value z1 + Double value z2 + Result multiplication of signum function and absolute value + + + + Swap column and + + Source matrix + The number of rows in + Column A index to swap + Column B index to swap + + + + Scale column by starting from row + + Source matrix + The number of rows in + Column to scale + Row to scale from + Scale value + + + + Scale vector by starting from index + + Source vector + Row to scale from + Scale value + + + + Given the Cartesian coordinates (da, db) of a point p, these fucntion return the parameters da, db, c, and s + associated with the Givens rotation that zeros the y-coordinate of the point. + + Provides the x-coordinate of the point p. On exit contains the parameter r associated with the Givens rotation + Provides the y-coordinate of the point p. On exit contains the parameter z associated with the Givens rotation + Contains the parameter c associated with the Givens rotation + Contains the parameter s associated with the Givens rotation + This is equivalent to the DROTG LAPACK routine. + + + + Calculate Norm 2 of the column in matrix starting from row + + Source matrix + The number of rows in + Column index + Start row index + Norm2 (Euclidean norm) of the column + + + + Calculate Norm 2 of the vector starting from index + + Source vector + Start index + Norm2 (Euclidean norm) of the vector + + + + Calculate dot product of and + + Source matrix + The number of rows in + Index of column A + Index of column B + Starting row index + Dot product value + + + + Performs rotation of points in the plane. Given two vectors x and y , + each vector element of these vectors is replaced as follows: x(i) = c*x(i) + s*y(i); y(i) = c*y(i) - s*x(i) + + Source matrix + The number of rows in + Index of column A + Index of column B + Scalar "c" value + Scalar "s" value + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + double version of the class. + + + + + Initializes a new instance of the Matrix class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Returns the conjugate transpose of this matrix. + + The conjugate transpose of this matrix. + + + + Puts the conjugate transpose of this matrix into the result matrix. + + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to divide by each element of the matrix. + The matrix to store the result of the division. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The matrix to store the result of the pointwise power. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Computes the Moore-Penrose Pseudo-Inverse of this matrix. + + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Calculates the p-norms of all row vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the p-norms of all column vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all row vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all column vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the value sum of each row vector. + + + + + Calculates the absolute value sum of each row vector. + + + + + Calculates the value sum of each column vector. + + + + + Calculates the absolute value sum of each column vector. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Bi-Conjugate Gradient Stabilized (BiCGStab) solver is an 'improvement' + of the standard Conjugate Gradient (CG) solver. Unlike the CG solver the + BiCGStab can be used on non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The Bi-CGSTAB algorithm was taken from:
+ Templates for the solution of linear systems: Building blocks + for iterative methods +
+ Richard Barrett, Michael Berry, Tony F. Chan, James Demmel, + June M. Donato, Jack Dongarra, Victor Eijkhout, Roldan Pozo, + Charles Romine and Henk van der Vorst +
+ Url: http://www.netlib.org/templates/Templates.html +
+ Algorithm is described in Chapter 2, section 2.3.8, page 27 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient , A. + The solution , b. + The result , x. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A composite matrix solver. The actual solver is made by a sequence of + matrix solvers. + + + + Solver based on:
+ Faster PDE-based simulations using robust composite linear solvers
+ S. Bhowmicka, P. Raghavan a,*, L. McInnes b, B. Norris
+ Future Generation Computer Systems, Vol 20, 2004, pp 373–387
+
+ + Note that if an iterator is passed to this solver it will be used for all the sub-solvers. + +
+
+ + + The collection of solvers that will be used + + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A diagonal preconditioner. The preconditioner uses the inverse + of the matrix diagonal as preconditioning values. + + + + + The inverse of the matrix diagonal. + + + + + Returns the decomposed matrix diagonal. + + The matrix diagonal. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + A Generalized Product Bi-Conjugate Gradient iterative matrix solver. + + + + The Generalized Product Bi-Conjugate Gradient (GPBiCG) solver is an + alternative version of the Bi-Conjugate Gradient stabilized (CG) solver. + Unlike the CG solver the GPBiCG solver can be used on + non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The GPBiCG algorithm was taken from:
+ GPBiCG(m,l): A hybrid of BiCGSTAB and GPBiCG methods with + efficiency and robustness +
+ S. Fujino +
+ Applied Numerical Mathematics, Volume 41, 2002, pp 107 - 117 +
+
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Indicates the number of BiCGStab steps should be taken + before switching. + + + + + Indicates the number of GPBiCG steps should be taken + before switching. + + + + + Gets or sets the number of steps taken with the BiCgStab algorithm + before switching over to the GPBiCG algorithm. + + + + + Gets or sets the number of steps taken with the GPBiCG algorithm + before switching over to the BiCgStab algorithm. + + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Decide if to do steps with BiCgStab + + Number of iteration + true if yes, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + An incomplete, level 0, LU factorization preconditioner. + + + The ILU(0) algorithm was taken from:
+ Iterative methods for sparse linear systems
+ Yousef Saad
+ Algorithm is described in Chapter 10, section 10.3.2, page 275
+
+
+ + + The matrix holding the lower (L) and upper (U) matrices. The + decomposition matrices are combined to reduce storage. + + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + A new matrix containing the lower triagonal elements. + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + This class performs an Incomplete LU factorization with drop tolerance + and partial pivoting. The drop tolerance indicates which additional entries + will be dropped from the factorized LU matrices. + + + The ILUTP-Mem algorithm was taken from:
+ ILUTP_Mem: a Space-Efficient Incomplete LU Preconditioner +
+ Tzu-Yi Chen, Department of Mathematics and Computer Science,
+ Pomona College, Claremont CA 91711, USA
+ Published in:
+ Lecture Notes in Computer Science
+ Volume 3046 / 2004
+ pp. 20 - 28
+ Algorithm is described in Section 2, page 22 +
+
+ + + The default fill level. + + + + + The default drop tolerance. + + + + + The decomposed upper triangular matrix. + + + + + The decomposed lower triangular matrix. + + + + + The array containing the pivot values. + + + + + The fill level. + + + + + The drop tolerance. + + + + + The pivot tolerance. + + + + + Initializes a new instance of the class with the default settings. + + + + + Initializes a new instance of the class with the specified settings. + + + The amount of fill that is allowed in the matrix. The value is a fraction of + the number of non-zero entries in the original matrix. Values should be positive. + + + The absolute drop tolerance which indicates below what absolute value an entry + will be dropped from the matrix. A drop tolerance of 0.0 means that no values + will be dropped. Values should always be positive. + + + The pivot tolerance which indicates at what level pivoting will take place. A + value of 0.0 means that no pivoting will take place. + + + + + Gets or sets the amount of fill that is allowed in the matrix. The + value is a fraction of the number of non-zero entries in the original + matrix. The standard value is 200. + + + + Values should always be positive and can be higher than 1.0. A value lower + than 1.0 means that the eventual preconditioner matrix will have fewer + non-zero entries as the original matrix. A value higher than 1.0 means that + the eventual preconditioner can have more non-zero values than the original + matrix. + + + Note that any changes to the FillLevel after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the absolute drop tolerance which indicates below what absolute value + an entry will be dropped from the matrix. The standard value is 0.0001. + + + + The values should always be positive and can be larger than 1.0. A low value will + keep more small numbers in the preconditioner matrix. A high value will remove + more small numbers from the preconditioner matrix. + + + Note that any changes to the DropTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the pivot tolerance which indicates at what level pivoting will + take place. The standard value is 0.0 which means pivoting will never take place. + + + + The pivot tolerance is used to calculate if pivoting is necessary. Pivoting + will take place if any of the values in a row is bigger than the + diagonal value of that row divided by the pivot tolerance, i.e. pivoting + will take place if row(i,j) > row(i,i) / PivotTolerance for + any j that is not equal to i. + + + Note that any changes to the PivotTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the lower triagonal elements. + + + + Returns the pivot array. This array is not needed for normal use because + the preconditioner will return the solution vector values in the proper order. + + + This method is used for debugging purposes only and should normally not be used. + + The pivot array. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. Note that the + method takes a general matrix type. However internally the data is stored + as a sparse matrix. Therefore it is not recommended to pass a dense matrix. + + If is . + If is not a square matrix. + + + + Pivot elements in the according to internal pivot array + + Row to pivot in + + + + Was pivoting already performed + + Pivots already done + Current item to pivot + true if performed, otherwise false + + + + Swap columns in the + + Source . + First column index to swap + Second column index to swap + + + + Sort vector descending, not changing vector but placing sorted indicies to + + Start sort form + Sort till upper bound + Array with sorted vector indicies + Source + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + Pivot elements in according to internal pivot array + + Source . + Result after pivoting. + + + + An element sort algorithm for the class. + + + This sort algorithm is used to sort the columns in a sparse matrix based on + the value of the element on the diagonal of the matrix. + + + + + Sorts the elements of the vector in decreasing + fashion. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Sorts the elements of the vector in decreasing + fashion using heap sort algorithm. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Build heap for double indicies + + Root position + Length of + Indicies of + Target + + + + Sift double indicies + + Indicies of + Target + Root position + Length of + + + + Sorts the given integers in a decreasing fashion. + + The values. + + + + Sort the given integers in a decreasing fashion using heapsort algorithm + + Array of values to sort + Length of + + + + Build heap + + Target values array + Root position + Length of + + + + Sift values + + Target value array + Root position + Length of + + + + Exchange values in array + + Target values array + First value to exchange + Second value to exchange + + + + A simple milu(0) preconditioner. + + + Original Fortran code by Youcef Saad (07 January 2004) + + + + Use modified or standard ILU(0) + + + + Gets or sets a value indicating whether to use modified or standard ILU(0). + + + + + Gets a value indicating whether the preconditioner is initialized. + + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square or is not an + instance of SparseCompressedRowMatrixStorage. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector b. + The left hand side vector x. + + + + MILU0 is a simple milu(0) preconditioner. + + Order of the matrix. + Matrix values in CSR format (input). + Column indices (input). + Row pointers (input). + Matrix values in MSR format (output). + Row pointers and column indices (output). + Pointer to diagonal elements (output). + True if the modified/MILU algorithm should be used (recommended) + Returns 0 on success or k > 0 if a zero pivot was encountered at step k. + + + + A Multiple-Lanczos Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Multiple-Lanczos Bi-Conjugate Gradient stabilized (ML(k)-BiCGStab) solver is an 'improvement' + of the standard BiCgStab solver. + + + The algorithm was taken from:
+ ML(k)BiCGSTAB: A BiCGSTAB variant based on multiple Lanczos starting vectors +
+ Man-chung Yeung and Tony F. Chan +
+ SIAM Journal of Scientific Computing +
+ Volume 21, Number 4, pp. 1263 - 1290 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + The default number of starting vectors. + + + + + The collection of starting vectors which are used as the basis for the Krylov sub-space. + + + + + The number of starting vectors used by the algorithm + + + + + Gets or sets the number of starting vectors. + + + Must be larger than 1 and smaller than the number of variables in the matrix that + for which this solver will be used. + + + + + Resets the number of starting vectors to the default value. + + + + + Gets or sets a series of orthonormal vectors which will be used as basis for the + Krylov sub-space. + + + + + Gets the number of starting vectors to create + + Maximum number + Number of variables + Number of starting vectors to create + + + + Returns an array of starting vectors. + + The maximum number of starting vectors that should be created. + The number of variables. + + An array with starting vectors. The array will never be larger than the + but it may be smaller if + the is smaller than + the . + + + + + Create random vecrors array + + Number of vectors + Size of each vector + Array of random vectors + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Source A. + Residual data. + x data. + b data. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Transpose Free Quasi-Minimal Residual (TFQMR) iterative matrix solver. + + + + The TFQMR algorithm was taken from:
+ Iterative methods for sparse linear systems. +
+ Yousef Saad +
+ Algorithm is described in Chapter 7, section 7.4.3, page 219 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Is even? + + Number to check + true if even, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Matrix with sparse storage, intended for very large matrices where most of the cells are zero. + The underlying storage scheme is 3-array compressed-sparse-row (CSR) Format. + Wikipedia - CSR. + + + + + Gets the number of non zero elements in the matrix. + + The number of non zero elements. + + + + Create a new sparse matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new sparse matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable. + The enumerable is assumed to be in row-major order (row by row). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + + Create a new sparse matrix with the given number of rows and columns as a copy of the given array. + The array is assumed to be in column-major order (column by column). + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix and initialize each value to the same provided value. + + + + + Create a new sparse matrix and initialize each value using the provided init function. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Evaluates whether this matrix is symmetric. + + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + A vector with sparse storage, intended for very large vectors where most of the cells are zero. + + The sparse vector is not thread safe. + + + + Gets the number of non zero elements in the vector. + + The number of non zero elements. + + + + Create a new sparse vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new sparse vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new sparse vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector and initialize each value using the provided value. + + + + + Create a new sparse vector and initialize each value using the provided init function. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + Warning, the new 'sparse vector' with a non-zero scalar added to it will be a 100% filled + sparse vector and very inefficient. Would be better to work with a dense vector instead. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Negates vector and saves result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Multiplies a vector with a scalar. + + The vector to scale. + The scalar value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a scalar. + + The scalar value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a scalar. + + The vector to divide. + The scalar value. + The result of the division. + If is . + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + of each element of the vector of the given divisor. + + The vector whose elements we want to compute the modulus of. + The divisor to use, + The result of the calculation + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Creates a double sparse vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n,n,..', '(n,n,..)', '[n,n,...]', where n is a double. + + + A double sparse vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a real sparse vector to double-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a real sparse vector to double-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + double version of the class. + + + + + Initializes a new instance of the Vector class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Conjugates vector and save result to + + Target vector + + + + Negates vector and saves result to + + Target vector + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Divides each element of the vector by a scalar and stores the result in the result vector. + + + The scalar to divide with. + + + The vector to store the result of the division. + + + + + Divides a scalar by each element of the vector and stores the result in the result vector. + + The scalar to divide. + The vector to store the result of the division. + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise raise this vector to an exponent and store the result into the result vector. + + The exponent to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Returns the value of the absolute minimum element. + + The value of the absolute minimum element. + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the value of the absolute maximum element. + + The value of the absolute maximum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + + The p value. + + + Scalar ret = ( ∑|At(i)|^p )^(1/p) + + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Normalizes this vector to a unit vector with respect to the p-norm. + + + The p value. + + + This vector normalized to a unit vector with respect to the p-norm. + + + + + A Matrix class with dense storage. The underlying storage is a one dimensional array in column-major order (column by column). + + + + + Number of rows. + + Using this instead of the RowCount property to speed up calculating + a matrix index in the data array. + + + + Number of columns. + + Using this instead of the ColumnCount property to speed up calculating + a matrix index in the data array. + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new dense matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new dense matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to be in column-major order (column by column) and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + + Create a new dense matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable. + The enumerable is assumed to be in column-major order (column by column). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix and initialize each value to the same provided value. + + + + + Create a new dense matrix and initialize each value using the provided init function. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new dense matrix with values sampled from the provided random distribution. + + + + + Gets the matrix's data. + + The matrix's data. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of add + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the matrix and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Evaluates whether this matrix is symmetric. + + + + + A vector using dense storage. + + + + + Number of elements + + + + + Gets the vector's data. + + + + + Create a new dense vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new dense vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new dense vector directly binding to a raw array. + The array is used directly without copying. + Very efficient, but changes to the array and the vector will affect each other. + + + + + Create a new dense vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector and initialize each value using the provided value. + + + + + Create a new dense vector and initialize each value using the provided init function. + + + + + Create a new dense vector with values sampled from the provided random distribution. + + + + + Gets the vector's data. + + The vector's data. + + + + Returns a reference to the internal data structure. + + The DenseVector whose internal data we are + returning. + + A reference to the internal date of the given vector. + + + + + Returns a vector bound directly to a reference of the provided array. + + The array to bind to the DenseVector object. + + A DenseVector whose values are bound to the given array. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + The scalar to add. + The vector to store the result of the addition. + + + + Adds another vector to this vector and stores the result into the result vector. + + The vector to add to this one. + The vector to store the result of the addition. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The vector to store the result of the subtraction. + + + + Subtracts another vector from this vector and stores the result into the result vector. + + The vector to subtract from this one. + The vector to store the result of the subtraction. + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Negates vector and saves result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + The scalar to multiply. + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Multiplies a vector with a scalar. + + The vector to scale. + The scalar value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a scalar. + + The scalar value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a scalar. + + The vector to divide. + The scalar value. + The result of the division. + If is . + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + of each element of the vector of the given divisor. + + The vector whose elements we want to compute the modulus of. + The divisor to use, + The result of the calculation + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Creates a float dense vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n,n,..', '(n,n,..)', '[n,n,...]', where n is a float. + + + A float dense vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a real dense vector to float-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a real dense vector to float-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + A matrix type for diagonal matrices. + + + Diagonal matrices can be non-square matrices but the diagonal always starts + at element 0,0. A diagonal matrix will throw an exception if non diagonal + entries are set. The exception to this is when the off diagonal elements are + 0.0 or NaN; these settings will cause no change to the diagonal matrix. + + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new diagonal matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All diagonal cells of the matrix will be initialized to the provided value, all non-diagonal ones to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to contain the diagonal elements only and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new diagonal matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + The matrix to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + The array to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided enumerable. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new diagonal matrix with diagonal values sampled from the provided random distribution. + + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + If the result matrix's dimensions are not the same as this matrix. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to add. + The matrix to store the result of the division. + + + + Computes the determinant of this matrix. + + The determinant of this matrix. + + + + Returns the elements of the diagonal in a . + + The elements of the diagonal. + For non-square matrices, the method returns Min(Rows, Columns) elements where + i == j (i is the row index, and j is the column index). + + + + Copies the values of the given array to the diagonal. + + The array to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + + Copies the values of the given to the diagonal. + + The vector to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced L2 norm of the matrix. + The largest singular value of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + Calculates the condition number of this matrix. + The condition number of the matrix. + + + Computes the inverse of this matrix. + If is not a square matrix. + If is singular. + The inverse of this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Creates a matrix that contains the values from the requested sub-matrix. + + The row to start copying from. + The number of rows to copy. Must be positive. + The column to start copying from. + The number of columns to copy. Must be positive. + The requested sub-matrix. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + If or + is not positive. + + + + Permute the columns of a matrix according to a permutation. + + The column permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Permute the rows of a matrix according to a permutation. + + The row permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Evaluates whether this matrix is symmetric. + + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + A class which encapsulates the functionality of a Cholesky factorization. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Gets the determinant of the matrix for which the Cholesky matrix was computed. + + + + + Gets the log determinant of the matrix for which the Cholesky matrix was computed. + + + + + A class which encapsulates the functionality of a Cholesky factorization for dense matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Symmetric Householder reduction to tridiagonal form. + + Data array of matrix V (eigenvectors) + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tred2 by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + Data array of matrix V (eigenvectors) + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Nonsymmetric reduction to Hessenberg form. + + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Complex scalar division X/Y. + + Real part of X + Imaginary part of X + Real part of Y + Imaginary part of Y + Division result as a number. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an orthogonal matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Factorize matrix using the modified Gram-Schmidt method. + + Initial matrix. On exit is replaced by Q. + Number of rows in Q. + Number of columns in Q. + On exit is filled by R. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Gets or sets Tau vector. Contains additional information on Q - used for native solver. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The QR factorization method to use. + If is null. + If row count is less then column count + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + If SVD algorithm failed to converge with matrix . + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Gets the absolute value of determinant of the square matrix for which the EVD was computed. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + In the Math.Net implementation we also store a set of pivot elements for increased + numerical stability. The pivot elements encode a permutation matrix P such that P*A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Gets the determinant of the matrix for which the LU factorization was computed. + + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A (m x n) may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + If a factorization is peformed, the resulting Q matrix is an m x m matrix + and the R matrix is an m x n matrix. If a factorization is performed, the + resulting Q matrix is an m x n matrix and the R matrix is an n x n matrix. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD). + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets the two norm of the . + + The 2-norm of the . + + + + Gets the condition number max(S) / min(S) + + The condition number. + + + + Gets the determinant of the square matrix for which the SVD was computed. + + + + + A class which encapsulates the functionality of a Cholesky factorization for user matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Symmetric Householder reduction to tridiagonal form. + + The eigen vectors to work on. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tred2 by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + The eigen vectors to work on. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Nonsymmetric reduction to Hessenberg form. + + The eigen vectors to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + The eigen vectors to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Complex scalar division X/Y. + + Real part of X + Imaginary part of X + Real part of Y + Imaginary part of Y + Division result as a number. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an orthogonal matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The QR factorization method to use. + If is null. + + + + Generate column from initial matrix to work array + + Initial matrix + The first row + Column index + Generated vector + + + + Perform calculation of Q or R + + Work array + Q or R matrices + The first row + The last row + The first column + The last column + Number of available CPUs + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + + + + + Calculates absolute value of multiplied on signum function of + + Double value z1 + Double value z2 + Result multiplication of signum function and absolute value + + + + Swap column and + + Source matrix + The number of rows in + Column A index to swap + Column B index to swap + + + + Scale column by starting from row + + Source matrix + The number of rows in + Column to scale + Row to scale from + Scale value + + + + Scale vector by starting from index + + Source vector + Row to scale from + Scale value + + + + Given the Cartesian coordinates (da, db) of a point p, these fucntion return the parameters da, db, c, and s + associated with the Givens rotation that zeros the y-coordinate of the point. + + Provides the x-coordinate of the point p. On exit contains the parameter r associated with the Givens rotation + Provides the y-coordinate of the point p. On exit contains the parameter z associated with the Givens rotation + Contains the parameter c associated with the Givens rotation + Contains the parameter s associated with the Givens rotation + This is equivalent to the DROTG LAPACK routine. + + + + Calculate Norm 2 of the column in matrix starting from row + + Source matrix + The number of rows in + Column index + Start row index + Norm2 (Euclidean norm) of the column + + + + Calculate Norm 2 of the vector starting from index + + Source vector + Start index + Norm2 (Euclidean norm) of the vector + + + + Calculate dot product of and + + Source matrix + The number of rows in + Index of column A + Index of column B + Starting row index + Dot product value + + + + Performs rotation of points in the plane. Given two vectors x and y , + each vector element of these vectors is replaced as follows: x(i) = c*x(i) + s*y(i); y(i) = c*y(i) - s*x(i) + + Source matrix + The number of rows in + Index of column A + Index of column B + Scalar "c" value + Scalar "s" value + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + float version of the class. + + + + + Initializes a new instance of the Matrix class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Returns the conjugate transpose of this matrix. + + The conjugate transpose of this matrix. + + + + Puts the conjugate transpose of this matrix into the result matrix. + + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to divide by each element of the matrix. + The matrix to store the result of the division. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The matrix to store the result of the pointwise power. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Computes the Moore-Penrose Pseudo-Inverse of this matrix. + + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Calculates the p-norms of all row vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the p-norms of all column vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all row vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all column vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the value sum of each row vector. + + + + + Calculates the absolute value sum of each row vector. + + + + + Calculates the value sum of each column vector. + + + + + Calculates the absolute value sum of each column vector. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Bi-Conjugate Gradient Stabilized (BiCGStab) solver is an 'improvement' + of the standard Conjugate Gradient (CG) solver. Unlike the CG solver the + BiCGStab can be used on non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The Bi-CGSTAB algorithm was taken from:
+ Templates for the solution of linear systems: Building blocks + for iterative methods +
+ Richard Barrett, Michael Berry, Tony F. Chan, James Demmel, + June M. Donato, Jack Dongarra, Victor Eijkhout, Roldan Pozo, + Charles Romine and Henk van der Vorst +
+ Url: http://www.netlib.org/templates/Templates.html +
+ Algorithm is described in Chapter 2, section 2.3.8, page 27 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient , A. + The solution , b. + The result , x. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A composite matrix solver. The actual solver is made by a sequence of + matrix solvers. + + + + Solver based on:
+ Faster PDE-based simulations using robust composite linear solvers
+ S. Bhowmicka, P. Raghavan a,*, L. McInnes b, B. Norris
+ Future Generation Computer Systems, Vol 20, 2004, pp 373–387
+
+ + Note that if an iterator is passed to this solver it will be used for all the sub-solvers. + +
+
+ + + The collection of solvers that will be used + + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A diagonal preconditioner. The preconditioner uses the inverse + of the matrix diagonal as preconditioning values. + + + + + The inverse of the matrix diagonal. + + + + + Returns the decomposed matrix diagonal. + + The matrix diagonal. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + A Generalized Product Bi-Conjugate Gradient iterative matrix solver. + + + + The Generalized Product Bi-Conjugate Gradient (GPBiCG) solver is an + alternative version of the Bi-Conjugate Gradient stabilized (CG) solver. + Unlike the CG solver the GPBiCG solver can be used on + non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The GPBiCG algorithm was taken from:
+ GPBiCG(m,l): A hybrid of BiCGSTAB and GPBiCG methods with + efficiency and robustness +
+ S. Fujino +
+ Applied Numerical Mathematics, Volume 41, 2002, pp 107 - 117 +
+
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Indicates the number of BiCGStab steps should be taken + before switching. + + + + + Indicates the number of GPBiCG steps should be taken + before switching. + + + + + Gets or sets the number of steps taken with the BiCgStab algorithm + before switching over to the GPBiCG algorithm. + + + + + Gets or sets the number of steps taken with the GPBiCG algorithm + before switching over to the BiCgStab algorithm. + + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Decide if to do steps with BiCgStab + + Number of iteration + true if yes, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + An incomplete, level 0, LU factorization preconditioner. + + + The ILU(0) algorithm was taken from:
+ Iterative methods for sparse linear systems
+ Yousef Saad
+ Algorithm is described in Chapter 10, section 10.3.2, page 275
+
+
+ + + The matrix holding the lower (L) and upper (U) matrices. The + decomposition matrices are combined to reduce storage. + + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + A new matrix containing the lower triagonal elements. + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + This class performs an Incomplete LU factorization with drop tolerance + and partial pivoting. The drop tolerance indicates which additional entries + will be dropped from the factorized LU matrices. + + + The ILUTP-Mem algorithm was taken from:
+ ILUTP_Mem: a Space-Efficient Incomplete LU Preconditioner +
+ Tzu-Yi Chen, Department of Mathematics and Computer Science,
+ Pomona College, Claremont CA 91711, USA
+ Published in:
+ Lecture Notes in Computer Science
+ Volume 3046 / 2004
+ pp. 20 - 28
+ Algorithm is described in Section 2, page 22 +
+
+ + + The default fill level. + + + + + The default drop tolerance. + + + + + The decomposed upper triangular matrix. + + + + + The decomposed lower triangular matrix. + + + + + The array containing the pivot values. + + + + + The fill level. + + + + + The drop tolerance. + + + + + The pivot tolerance. + + + + + Initializes a new instance of the class with the default settings. + + + + + Initializes a new instance of the class with the specified settings. + + + The amount of fill that is allowed in the matrix. The value is a fraction of + the number of non-zero entries in the original matrix. Values should be positive. + + + The absolute drop tolerance which indicates below what absolute value an entry + will be dropped from the matrix. A drop tolerance of 0.0 means that no values + will be dropped. Values should always be positive. + + + The pivot tolerance which indicates at what level pivoting will take place. A + value of 0.0 means that no pivoting will take place. + + + + + Gets or sets the amount of fill that is allowed in the matrix. The + value is a fraction of the number of non-zero entries in the original + matrix. The standard value is 200. + + + + Values should always be positive and can be higher than 1.0. A value lower + than 1.0 means that the eventual preconditioner matrix will have fewer + non-zero entries as the original matrix. A value higher than 1.0 means that + the eventual preconditioner can have more non-zero values than the original + matrix. + + + Note that any changes to the FillLevel after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the absolute drop tolerance which indicates below what absolute value + an entry will be dropped from the matrix. The standard value is 0.0001. + + + + The values should always be positive and can be larger than 1.0. A low value will + keep more small numbers in the preconditioner matrix. A high value will remove + more small numbers from the preconditioner matrix. + + + Note that any changes to the DropTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the pivot tolerance which indicates at what level pivoting will + take place. The standard value is 0.0 which means pivoting will never take place. + + + + The pivot tolerance is used to calculate if pivoting is necessary. Pivoting + will take place if any of the values in a row is bigger than the + diagonal value of that row divided by the pivot tolerance, i.e. pivoting + will take place if row(i,j) > row(i,i) / PivotTolerance for + any j that is not equal to i. + + + Note that any changes to the PivotTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the lower triagonal elements. + + + + Returns the pivot array. This array is not needed for normal use because + the preconditioner will return the solution vector values in the proper order. + + + This method is used for debugging purposes only and should normally not be used. + + The pivot array. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. Note that the + method takes a general matrix type. However internally the data is stored + as a sparse matrix. Therefore it is not recommended to pass a dense matrix. + + If is . + If is not a square matrix. + + + + Pivot elements in the according to internal pivot array + + Row to pivot in + + + + Was pivoting already performed + + Pivots already done + Current item to pivot + true if performed, otherwise false + + + + Swap columns in the + + Source . + First column index to swap + Second column index to swap + + + + Sort vector descending, not changing vector but placing sorted indicies to + + Start sort form + Sort till upper bound + Array with sorted vector indicies + Source + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + Pivot elements in according to internal pivot array + + Source . + Result after pivoting. + + + + An element sort algorithm for the class. + + + This sort algorithm is used to sort the columns in a sparse matrix based on + the value of the element on the diagonal of the matrix. + + + + + Sorts the elements of the vector in decreasing + fashion. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Sorts the elements of the vector in decreasing + fashion using heap sort algorithm. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Build heap for double indicies + + Root position + Length of + Indicies of + Target + + + + Sift double indicies + + Indicies of + Target + Root position + Length of + + + + Sorts the given integers in a decreasing fashion. + + The values. + + + + Sort the given integers in a decreasing fashion using heapsort algorithm + + Array of values to sort + Length of + + + + Build heap + + Target values array + Root position + Length of + + + + Sift values + + Target value array + Root position + Length of + + + + Exchange values in array + + Target values array + First value to exchange + Second value to exchange + + + + A simple milu(0) preconditioner. + + + Original Fortran code by Youcef Saad (07 January 2004) + + + + Use modified or standard ILU(0) + + + + Gets or sets a value indicating whether to use modified or standard ILU(0). + + + + + Gets a value indicating whether the preconditioner is initialized. + + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square or is not an + instance of SparseCompressedRowMatrixStorage. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector b. + The left hand side vector x. + + + + MILU0 is a simple milu(0) preconditioner. + + Order of the matrix. + Matrix values in CSR format (input). + Column indices (input). + Row pointers (input). + Matrix values in MSR format (output). + Row pointers and column indices (output). + Pointer to diagonal elements (output). + True if the modified/MILU algorithm should be used (recommended) + Returns 0 on success or k > 0 if a zero pivot was encountered at step k. + + + + A Multiple-Lanczos Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Multiple-Lanczos Bi-Conjugate Gradient stabilized (ML(k)-BiCGStab) solver is an 'improvement' + of the standard BiCgStab solver. + + + The algorithm was taken from:
+ ML(k)BiCGSTAB: A BiCGSTAB variant based on multiple Lanczos starting vectors +
+ Man-chung Yeung and Tony F. Chan +
+ SIAM Journal of Scientific Computing +
+ Volume 21, Number 4, pp. 1263 - 1290 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + The default number of starting vectors. + + + + + The collection of starting vectors which are used as the basis for the Krylov sub-space. + + + + + The number of starting vectors used by the algorithm + + + + + Gets or sets the number of starting vectors. + + + Must be larger than 1 and smaller than the number of variables in the matrix that + for which this solver will be used. + + + + + Resets the number of starting vectors to the default value. + + + + + Gets or sets a series of orthonormal vectors which will be used as basis for the + Krylov sub-space. + + + + + Gets the number of starting vectors to create + + Maximum number + Number of variables + Number of starting vectors to create + + + + Returns an array of starting vectors. + + The maximum number of starting vectors that should be created. + The number of variables. + + An array with starting vectors. The array will never be larger than the + but it may be smaller if + the is smaller than + the . + + + + + Create random vectors array + + Number of vectors + Size of each vector + Array of random vectors + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Source A. + Residual data. + x data. + b data. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Transpose Free Quasi-Minimal Residual (TFQMR) iterative matrix solver. + + + + The TFQMR algorithm was taken from:
+ Iterative methods for sparse linear systems. +
+ Yousef Saad +
+ Algorithm is described in Chapter 7, section 7.4.3, page 219 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Is even? + + Number to check + true if even, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Matrix with sparse storage, intended for very large matrices where most of the cells are zero. + The underlying storage scheme is 3-array compressed-sparse-row (CSR) Format. + Wikipedia - CSR. + + + + + Gets the number of non zero elements in the matrix. + + The number of non zero elements. + + + + Create a new sparse matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new sparse matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable. + The enumerable is assumed to be in row-major order (row by row). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + + Create a new sparse matrix with the given number of rows and columns as a copy of the given array. + The array is assumed to be in column-major order (column by column). + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix and initialize each value to the same provided value. + + + + + Create a new sparse matrix and initialize each value using the provided init function. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Evaluates whether this matrix is symmetric. + + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + A vector with sparse storage, intended for very large vectors where most of the cells are zero. + + The sparse vector is not thread safe. + + + + Gets the number of non zero elements in the vector. + + The number of non zero elements. + + + + Create a new sparse vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new sparse vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new sparse vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector and initialize each value using the provided value. + + + + + Create a new sparse vector and initialize each value using the provided init function. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + Warning, the new 'sparse vector' with a non-zero scalar added to it will be a 100% filled + sparse vector and very inefficient. Would be better to work with a dense vector instead. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Negates vector and saves result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Multiplies a vector with a scalar. + + The vector to scale. + The scalar value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a scalar. + + The scalar value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a scalar. + + The vector to divide. + The scalar value. + The result of the division. + If is . + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + of each element of the vector of the given divisor. + + The vector whose elements we want to compute the modulus of. + The divisor to use, + The result of the calculation + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Creates a float sparse vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n,n,..', '(n,n,..)', '[n,n,...]', where n is a float. + + + A float sparse vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a real sparse vector to float-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a real sparse vector to float-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + float version of the class. + + + + + Initializes a new instance of the Vector class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Conjugates vector and save result to + + Target vector + + + + Negates vector and saves result to + + Target vector + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Divides each element of the vector by a scalar and stores the result in the result vector. + + + The scalar to divide with. + + + The vector to store the result of the division. + + + + + Divides a scalar by each element of the vector and stores the result in the result vector. + + The scalar to divide. + The vector to store the result of the division. + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise raise this vector to an exponent and store the result into the result vector. + + The exponent to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Returns the value of the absolute minimum element. + + The value of the absolute minimum element. + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the value of the absolute maximum element. + + The value of the absolute maximum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + + The p value. + + + Scalar ret = ( ∑|At(i)|^p )^(1/p) + + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Normalizes this vector to a unit vector with respect to the p-norm. + + + The p value. + + + This vector normalized to a unit vector with respect to the p-norm. + + + + + A Matrix class with dense storage. The underlying storage is a one dimensional array in column-major order (column by column). + + + + + Number of rows. + + Using this instead of the RowCount property to speed up calculating + a matrix index in the data array. + + + + Number of columns. + + Using this instead of the ColumnCount property to speed up calculating + a matrix index in the data array. + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new dense matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new dense matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to be in column-major order (column by column) and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + + Create a new dense matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable. + The enumerable is assumed to be in column-major order (column by column). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix and initialize each value to the same provided value. + + + + + Create a new dense matrix and initialize each value using the provided init function. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new dense matrix with values sampled from the provided random distribution. + + + + + Gets the matrix's data. + + The matrix's data. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of add + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the matrix and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Evaluates whether this matrix is symmetric. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A vector using dense storage. + + + + + Number of elements + + + + + Gets the vector's data. + + + + + Create a new dense vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new dense vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new dense vector directly binding to a raw array. + The array is used directly without copying. + Very efficient, but changes to the array and the vector will affect each other. + + + + + Create a new dense vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector and initialize each value using the provided value. + + + + + Create a new dense vector and initialize each value using the provided init function. + + + + + Create a new dense vector with values sampled from the provided random distribution. + + + + + Gets the vector's data. + + The vector's data. + + + + Returns a reference to the internal data structure. + + The DenseVector whose internal data we are + returning. + + A reference to the internal date of the given vector. + + + + + Returns a vector bound directly to a reference of the provided array. + + The array to bind to the DenseVector object. + + A DenseVector whose values are bound to the given array. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + The scalar to add. + The vector to store the result of the addition. + + + + Adds another vector to this vector and stores the result into the result vector. + + The vector to add to this one. + The vector to store the result of the addition. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The vector to store the result of the subtraction. + + + + Subtracts another vector from this vector and stores the result into the result vector. + + The vector to subtract from this one. + The vector to store the result of the subtraction. + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Negates vector and saves result to + + Target vector + + + + Conjugates vector and save result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + The scalar to multiply. + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Multiplies a vector with a complex. + + The vector to scale. + The Complex value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a complex. + + The Complex value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a complex. + + The vector to divide. + The Complex value. + The result of the division. + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Creates a Complex dense vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n;n;..', '(n;n;..)', '[n;n;...]', where n is a double. + + + A Complex dense vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a complex dense vector to double-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a complex dense vector to double-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + A matrix type for diagonal matrices. + + + Diagonal matrices can be non-square matrices but the diagonal always starts + at element 0,0. A diagonal matrix will throw an exception if non diagonal + entries are set. The exception to this is when the off diagonal elements are + 0.0 or NaN; these settings will cause no change to the diagonal matrix. + + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new diagonal matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All diagonal cells of the matrix will be initialized to the provided value, all non-diagonal ones to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to contain the diagonal elements only and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new diagonal matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + The matrix to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + The array to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided enumerable. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new diagonal matrix with diagonal values sampled from the provided random distribution. + + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + If the result matrix's dimensions are not the same as this matrix. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to add. + The matrix to store the result of the division. + + + + Computes the determinant of this matrix. + + The determinant of this matrix. + + + + Returns the elements of the diagonal in a . + + The elements of the diagonal. + For non-square matrices, the method returns Min(Rows, Columns) elements where + i == j (i is the row index, and j is the column index). + + + + Copies the values of the given array to the diagonal. + + The array to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + + Copies the values of the given to the diagonal. + + The vector to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced L2 norm of the matrix. + The largest singular value of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the Frobenius norm of this matrix. + The Frobenius norm of this matrix. + + + Calculates the condition number of this matrix. + The condition number of the matrix. + + + Computes the inverse of this matrix. + If is not a square matrix. + If is singular. + The inverse of this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Creates a matrix that contains the values from the requested sub-matrix. + + The row to start copying from. + The number of rows to copy. Must be positive. + The column to start copying from. + The number of columns to copy. Must be positive. + The requested sub-matrix. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + If or + is not positive. + + + + Permute the columns of a matrix according to a permutation. + + The column permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Permute the rows of a matrix according to a permutation. + + The row permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Evaluates whether this matrix is symmetric. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A class which encapsulates the functionality of a Cholesky factorization. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Gets the determinant of the matrix for which the Cholesky matrix was computed. + + + + + Gets the log determinant of the matrix for which the Cholesky matrix was computed. + + + + + A class which encapsulates the functionality of a Cholesky factorization for dense matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a complex matrix. + + + If A is hermitan, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is hermitan. + I.e. A = V*D*V' and V*VH=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Reduces a complex hermitian matrix to a real symmetric tridiagonal matrix using unitary similarity transformations. + + Source matrix to reduce + Output: Arrays for internal storage of real parts of eigenvalues + Output: Arrays for internal storage of imaginary parts of eigenvalues + Output: Arrays that contains further information about the transformations. + Order of initial matrix + This is derived from the Algol procedures HTRIDI by + Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + Data array of matrix V (eigenvectors) + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Determines eigenvectors by undoing the symmetric tridiagonalize transformation + + Data array of matrix V (eigenvectors) + Previously tridiagonalized matrix by . + Contains further information about the transformations + Input matrix order + This is derived from the Algol procedures HTRIBK, by + by Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Nonsymmetric reduction to Hessenberg form. + + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + Data array of the eigenvectors + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any complex square matrix A may be decomposed as A = QR where Q is an unitary mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an unitary matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Factorize matrix using the modified Gram-Schmidt method. + + Initial matrix. On exit is replaced by Q. + Number of rows in Q. + Number of columns in Q. + On exit is filled by R. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Gets or sets Tau vector. Contains additional information on Q - used for native solver. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The type of QR factorization to perform. + If is null. + If row count is less then column count + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + If SVD algorithm failed to converge with matrix . + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Gets the absolute value of determinant of the square matrix for which the EVD was computed. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + In the Math.Net implementation we also store a set of pivot elements for increased + numerical stability. The pivot elements encode a permutation matrix P such that P*A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Gets the determinant of the matrix for which the LU factorization was computed. + + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A (m x n) may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + If a factorization is peformed, the resulting Q matrix is an m x m matrix + and the R matrix is an m x n matrix. If a factorization is performed, the + resulting Q matrix is an m x n matrix and the R matrix is an n x n matrix. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD). + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets the two norm of the . + + The 2-norm of the . + + + + Gets the condition number max(S) / min(S) + + The condition number. + + + + Gets the determinant of the square matrix for which the SVD was computed. + + + + + A class which encapsulates the functionality of a Cholesky factorization for user matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a complex matrix. + + + If A is hermitan, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is hermitan. + I.e. A = V*D*V' and V*VH=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Reduces a complex hermitian matrix to a real symmetric tridiagonal matrix using unitary similarity transformations. + + Source matrix to reduce + Output: Arrays for internal storage of real parts of eigenvalues + Output: Arrays for internal storage of imaginary parts of eigenvalues + Output: Arrays that contains further information about the transformations. + Order of initial matrix + This is derived from the Algol procedures HTRIDI by + Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + The eigen vectors to work on. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Determines eigenvectors by undoing the symmetric tridiagonalize transformation + + The eigen vectors to work on. + Previously tridiagonalized matrix by . + Contains further information about the transformations + Input matrix order + This is derived from the Algol procedures HTRIBK, by + by Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Nonsymmetric reduction to Hessenberg form. + + The eigen vectors to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + The eigen vectors to work on. + The eigen values to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any complex square matrix A may be decomposed as A = QR where Q is an unitary mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an unitary matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The QR factorization method to use. + If is null. + + + + Generate column from initial matrix to work array + + Initial matrix + The first row + Column index + Generated vector + + + + Perform calculation of Q or R + + Work array + Q or R matrices + The first row + The last row + The first column + The last column + Number of available CPUs + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + + + + + Calculates absolute value of multiplied on signum function of + + Complex value z1 + Complex value z2 + Result multiplication of signum function and absolute value + + + + Interchanges two vectors and + + Source matrix + The number of rows in + Column A index to swap + Column B index to swap + + + + Scale column by starting from row + + Source matrix + The number of rows in + Column to scale + Row to scale from + Scale value + + + + Scale vector by starting from index + + Source vector + Row to scale from + Scale value + + + + Given the Cartesian coordinates (da, db) of a point p, these fucntion return the parameters da, db, c, and s + associated with the Givens rotation that zeros the y-coordinate of the point. + + Provides the x-coordinate of the point p. On exit contains the parameter r associated with the Givens rotation + Provides the y-coordinate of the point p. On exit contains the parameter z associated with the Givens rotation + Contains the parameter c associated with the Givens rotation + Contains the parameter s associated with the Givens rotation + This is equivalent to the DROTG LAPACK routine. + + + + Calculate Norm 2 of the column in matrix starting from row + + Source matrix + The number of rows in + Column index + Start row index + Norm2 (Euclidean norm) of the column + + + + Calculate Norm 2 of the vector starting from index + + Source vector + Start index + Norm2 (Euclidean norm) of the vector + + + + Calculate dot product of and conjugating the first vector. + + Source matrix + The number of rows in + Index of column A + Index of column B + Starting row index + Dot product value + + + + Performs rotation of points in the plane. Given two vectors x and y , + each vector element of these vectors is replaced as follows: x(i) = c*x(i) + s*y(i); y(i) = c*y(i) - s*x(i) + + Source matrix + The number of rows in + Index of column A + Index of column B + scalar cos value + scalar sin value + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Complex version of the class. + + + + + Initializes a new instance of the Matrix class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Returns the conjugate transpose of this matrix. + + The conjugate transpose of this matrix. + + + + Puts the conjugate transpose of this matrix into the result matrix. + + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to divide by each element of the matrix. + The matrix to store the result of the division. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The matrix to store the result of the pointwise power. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Pointwise applies the exponential function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Computes the Moore-Penrose Pseudo-Inverse of this matrix. + + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Calculates the p-norms of all row vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the p-norms of all column vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all row vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all column vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the value sum of each row vector. + + + + + Calculates the absolute value sum of each row vector. + + + + + Calculates the value sum of each column vector. + + + + + Calculates the absolute value sum of each column vector. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Bi-Conjugate Gradient Stabilized (BiCGStab) solver is an 'improvement' + of the standard Conjugate Gradient (CG) solver. Unlike the CG solver the + BiCGStab can be used on non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The Bi-CGSTAB algorithm was taken from:
+ Templates for the solution of linear systems: Building blocks + for iterative methods +
+ Richard Barrett, Michael Berry, Tony F. Chan, James Demmel, + June M. Donato, Jack Dongarra, Victor Eijkhout, Roldan Pozo, + Charles Romine and Henk van der Vorst +
+ Url: http://www.netlib.org/templates/Templates.html +
+ Algorithm is described in Chapter 2, section 2.3.8, page 27 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient , A. + The solution , b. + The result , x. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A composite matrix solver. The actual solver is made by a sequence of + matrix solvers. + + + + Solver based on:
+ Faster PDE-based simulations using robust composite linear solvers
+ S. Bhowmicka, P. Raghavan a,*, L. McInnes b, B. Norris
+ Future Generation Computer Systems, Vol 20, 2004, pp 373–387
+
+ + Note that if an iterator is passed to this solver it will be used for all the sub-solvers. + +
+
+ + + The collection of solvers that will be used + + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A diagonal preconditioner. The preconditioner uses the inverse + of the matrix diagonal as preconditioning values. + + + + + The inverse of the matrix diagonal. + + + + + Returns the decomposed matrix diagonal. + + The matrix diagonal. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + A Generalized Product Bi-Conjugate Gradient iterative matrix solver. + + + + The Generalized Product Bi-Conjugate Gradient (GPBiCG) solver is an + alternative version of the Bi-Conjugate Gradient stabilized (CG) solver. + Unlike the CG solver the GPBiCG solver can be used on + non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The GPBiCG algorithm was taken from:
+ GPBiCG(m,l): A hybrid of BiCGSTAB and GPBiCG methods with + efficiency and robustness +
+ S. Fujino +
+ Applied Numerical Mathematics, Volume 41, 2002, pp 107 - 117 +
+
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Indicates the number of BiCGStab steps should be taken + before switching. + + + + + Indicates the number of GPBiCG steps should be taken + before switching. + + + + + Gets or sets the number of steps taken with the BiCgStab algorithm + before switching over to the GPBiCG algorithm. + + + + + Gets or sets the number of steps taken with the GPBiCG algorithm + before switching over to the BiCgStab algorithm. + + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Decide if to do steps with BiCgStab + + Number of iteration + true if yes, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + An incomplete, level 0, LU factorization preconditioner. + + + The ILU(0) algorithm was taken from:
+ Iterative methods for sparse linear systems
+ Yousef Saad
+ Algorithm is described in Chapter 10, section 10.3.2, page 275
+
+
+ + + The matrix holding the lower (L) and upper (U) matrices. The + decomposition matrices are combined to reduce storage. + + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + A new matrix containing the lower triagonal elements. + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + This class performs an Incomplete LU factorization with drop tolerance + and partial pivoting. The drop tolerance indicates which additional entries + will be dropped from the factorized LU matrices. + + + The ILUTP-Mem algorithm was taken from:
+ ILUTP_Mem: a Space-Efficient Incomplete LU Preconditioner +
+ Tzu-Yi Chen, Department of Mathematics and Computer Science,
+ Pomona College, Claremont CA 91711, USA
+ Published in:
+ Lecture Notes in Computer Science
+ Volume 3046 / 2004
+ pp. 20 - 28
+ Algorithm is described in Section 2, page 22 +
+
+ + + The default fill level. + + + + + The default drop tolerance. + + + + + The decomposed upper triangular matrix. + + + + + The decomposed lower triangular matrix. + + + + + The array containing the pivot values. + + + + + The fill level. + + + + + The drop tolerance. + + + + + The pivot tolerance. + + + + + Initializes a new instance of the class with the default settings. + + + + + Initializes a new instance of the class with the specified settings. + + + The amount of fill that is allowed in the matrix. The value is a fraction of + the number of non-zero entries in the original matrix. Values should be positive. + + + The absolute drop tolerance which indicates below what absolute value an entry + will be dropped from the matrix. A drop tolerance of 0.0 means that no values + will be dropped. Values should always be positive. + + + The pivot tolerance which indicates at what level pivoting will take place. A + value of 0.0 means that no pivoting will take place. + + + + + Gets or sets the amount of fill that is allowed in the matrix. The + value is a fraction of the number of non-zero entries in the original + matrix. The standard value is 200. + + + + Values should always be positive and can be higher than 1.0. A value lower + than 1.0 means that the eventual preconditioner matrix will have fewer + non-zero entries as the original matrix. A value higher than 1.0 means that + the eventual preconditioner can have more non-zero values than the original + matrix. + + + Note that any changes to the FillLevel after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the absolute drop tolerance which indicates below what absolute value + an entry will be dropped from the matrix. The standard value is 0.0001. + + + + The values should always be positive and can be larger than 1.0. A low value will + keep more small numbers in the preconditioner matrix. A high value will remove + more small numbers from the preconditioner matrix. + + + Note that any changes to the DropTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the pivot tolerance which indicates at what level pivoting will + take place. The standard value is 0.0 which means pivoting will never take place. + + + + The pivot tolerance is used to calculate if pivoting is necessary. Pivoting + will take place if any of the values in a row is bigger than the + diagonal value of that row divided by the pivot tolerance, i.e. pivoting + will take place if row(i,j) > row(i,i) / PivotTolerance for + any j that is not equal to i. + + + Note that any changes to the PivotTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the lower triagonal elements. + + + + Returns the pivot array. This array is not needed for normal use because + the preconditioner will return the solution vector values in the proper order. + + + This method is used for debugging purposes only and should normally not be used. + + The pivot array. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. Note that the + method takes a general matrix type. However internally the data is stored + as a sparse matrix. Therefore it is not recommended to pass a dense matrix. + + If is . + If is not a square matrix. + + + + Pivot elements in the according to internal pivot array + + Row to pivot in + + + + Was pivoting already performed + + Pivots already done + Current item to pivot + true if performed, otherwise false + + + + Swap columns in the + + Source . + First column index to swap + Second column index to swap + + + + Sort vector descending, not changing vector but placing sorted indicies to + + Start sort form + Sort till upper bound + Array with sorted vector indicies + Source + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + Pivot elements in according to internal pivot array + + Source . + Result after pivoting. + + + + An element sort algorithm for the class. + + + This sort algorithm is used to sort the columns in a sparse matrix based on + the value of the element on the diagonal of the matrix. + + + + + Sorts the elements of the vector in decreasing + fashion. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Sorts the elements of the vector in decreasing + fashion using heap sort algorithm. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Build heap for double indicies + + Root position + Length of + Indicies of + Target + + + + Sift double indicies + + Indicies of + Target + Root position + Length of + + + + Sorts the given integers in a decreasing fashion. + + The values. + + + + Sort the given integers in a decreasing fashion using heapsort algorithm + + Array of values to sort + Length of + + + + Build heap + + Target values array + Root position + Length of + + + + Sift values + + Target value array + Root position + Length of + + + + Exchange values in array + + Target values array + First value to exchange + Second value to exchange + + + + A simple milu(0) preconditioner. + + + Original Fortran code by Youcef Saad (07 January 2004) + + + + Use modified or standard ILU(0) + + + + Gets or sets a value indicating whether to use modified or standard ILU(0). + + + + + Gets a value indicating whether the preconditioner is initialized. + + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square or is not an + instance of SparseCompressedRowMatrixStorage. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector b. + The left hand side vector x. + + + + MILU0 is a simple milu(0) preconditioner. + + Order of the matrix. + Matrix values in CSR format (input). + Column indices (input). + Row pointers (input). + Matrix values in MSR format (output). + Row pointers and column indices (output). + Pointer to diagonal elements (output). + True if the modified/MILU algorithm should be used (recommended) + Returns 0 on success or k > 0 if a zero pivot was encountered at step k. + + + + A Multiple-Lanczos Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Multiple-Lanczos Bi-Conjugate Gradient stabilized (ML(k)-BiCGStab) solver is an 'improvement' + of the standard BiCgStab solver. + + + The algorithm was taken from:
+ ML(k)BiCGSTAB: A BiCGSTAB variant based on multiple Lanczos starting vectors +
+ Man-chung Yeung and Tony F. Chan +
+ SIAM Journal of Scientific Computing +
+ Volume 21, Number 4, pp. 1263 - 1290 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + The default number of starting vectors. + + + + + The collection of starting vectors which are used as the basis for the Krylov sub-space. + + + + + The number of starting vectors used by the algorithm + + + + + Gets or sets the number of starting vectors. + + + Must be larger than 1 and smaller than the number of variables in the matrix that + for which this solver will be used. + + + + + Resets the number of starting vectors to the default value. + + + + + Gets or sets a series of orthonormal vectors which will be used as basis for the + Krylov sub-space. + + + + + Gets the number of starting vectors to create + + Maximum number + Number of variables + Number of starting vectors to create + + + + Returns an array of starting vectors. + + The maximum number of starting vectors that should be created. + The number of variables. + + An array with starting vectors. The array will never be larger than the + but it may be smaller if + the is smaller than + the . + + + + + Create random vecrors array + + Number of vectors + Size of each vector + Array of random vectors + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Source A. + Residual data. + x data. + b data. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Transpose Free Quasi-Minimal Residual (TFQMR) iterative matrix solver. + + + + The TFQMR algorithm was taken from:
+ Iterative methods for sparse linear systems. +
+ Yousef Saad +
+ Algorithm is described in Chapter 7, section 7.4.3, page 219 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Is even? + + Number to check + true if even, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Matrix with sparse storage, intended for very large matrices where most of the cells are zero. + The underlying storage scheme is 3-array compressed-sparse-row (CSR) Format. + Wikipedia - CSR. + + + + + Gets the number of non zero elements in the matrix. + + The number of non zero elements. + + + + Create a new sparse matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new sparse matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable. + The enumerable is assumed to be in row-major order (row by row). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + + Create a new sparse matrix with the given number of rows and columns as a copy of the given array. + The array is assumed to be in column-major order (column by column). + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix and initialize each value to the same provided value. + + + + + Create a new sparse matrix and initialize each value using the provided init function. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Evaluates whether this matrix is symmetric. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + A vector with sparse storage, intended for very large vectors where most of the cells are zero. + + The sparse vector is not thread safe. + + + + Gets the number of non zero elements in the vector. + + The number of non zero elements. + + + + Create a new sparse vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new sparse vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new sparse vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector and initialize each value using the provided value. + + + + + Create a new sparse vector and initialize each value using the provided init function. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + Warning, the new 'sparse vector' with a non-zero scalar added to it will be a 100% filled + sparse vector and very inefficient. Would be better to work with a dense vector instead. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Negates vector and saves result to + + Target vector + + + + Conjugates vector and save result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Multiplies a vector with a complex. + + The vector to scale. + The complex value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a complex. + + The complex value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a complex. + + The vector to divide. + The complex value. + The result of the division. + If is . + + + + Computes the modulus of each element of the vector of the given divisor. + + The vector whose elements we want to compute the modulus of. + The divisor to use, + The result of the calculation + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Creates a double sparse vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n;n;..', '(n;n;..)', '[n;n;...]', where n is a Complex. + + + A double sparse vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a complex sparse vector to double-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a complex sparse vector to double-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Complex version of the class. + + + + + Initializes a new instance of the Vector class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Conjugates vector and save result to + + Target vector + + + + Negates vector and saves result to + + Target vector + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Divides each element of the vector by a scalar and stores the result in the result vector. + + + The scalar to divide with. + + + The vector to store the result of the division. + + + + + Divides a scalar by each element of the vector and stores the result in the result vector. + + The scalar to divide. + The vector to store the result of the division. + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise raise this vector to an exponent and store the result into the result vector. + + The exponent to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Returns the value of the absolute minimum element. + + The value of the absolute minimum element. + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the value of the absolute maximum element. + + The value of the absolute maximum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + + The p value. + + + Scalar ret = ( ∑|At(i)|^p )^(1/p) + + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Normalizes this vector to a unit vector with respect to the p-norm. + + + The p value. + + + This vector normalized to a unit vector with respect to the p-norm. + + + + + A Matrix class with dense storage. The underlying storage is a one dimensional array in column-major order (column by column). + + + + + Number of rows. + + Using this instead of the RowCount property to speed up calculating + a matrix index in the data array. + + + + Number of columns. + + Using this instead of the ColumnCount property to speed up calculating + a matrix index in the data array. + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new dense matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new dense matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to be in column-major order (column by column) and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + + Create a new dense matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable. + The enumerable is assumed to be in column-major order (column by column). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix and initialize each value to the same provided value. + + + + + Create a new dense matrix and initialize each value using the provided init function. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new dense matrix with values sampled from the provided random distribution. + + + + + Gets the matrix's data. + + The matrix's data. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of add + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the matrix and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Evaluates whether this matrix is symmetric. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A vector using dense storage. + + + + + Number of elements + + + + + Gets the vector's data. + + + + + Create a new dense vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new dense vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new dense vector directly binding to a raw array. + The array is used directly without copying. + Very efficient, but changes to the array and the vector will affect each other. + + + + + Create a new dense vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector and initialize each value using the provided value. + + + + + Create a new dense vector and initialize each value using the provided init function. + + + + + Create a new dense vector with values sampled from the provided random distribution. + + + + + Gets the vector's data. + + The vector's data. + + + + Returns a reference to the internal data structure. + + The DenseVector whose internal data we are + returning. + + A reference to the internal date of the given vector. + + + + + Returns a vector bound directly to a reference of the provided array. + + The array to bind to the DenseVector object. + + A DenseVector whose values are bound to the given array. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + The scalar to add. + The vector to store the result of the addition. + + + + Adds another vector to this vector and stores the result into the result vector. + + The vector to add to this one. + The vector to store the result of the addition. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The vector to store the result of the subtraction. + + + + Subtracts another vector from this vector and stores the result into the result vector. + + The vector to subtract from this one. + The vector to store the result of the subtraction. + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Negates vector and saves result to + + Target vector + + + + Conjugates vector and save result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + The scalar to multiply. + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Multiplies a vector with a complex. + + The vector to scale. + The Complex32 value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a complex. + + The Complex32 value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a complex. + + The vector to divide. + The Complex32 value. + The result of the division. + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Creates a Complex32 dense vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n;n;..', '(n;n;..)', '[n;n;...]', where n is a double. + + + A Complex32 dense vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a complex dense vector to double-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a complex dense vector to double-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + A matrix type for diagonal matrices. + + + Diagonal matrices can be non-square matrices but the diagonal always starts + at element 0,0. A diagonal matrix will throw an exception if non diagonal + entries are set. The exception to this is when the off diagonal elements are + 0.0 or NaN; these settings will cause no change to the diagonal matrix. + + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new diagonal matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All diagonal cells of the matrix will be initialized to the provided value, all non-diagonal ones to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to contain the diagonal elements only and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new diagonal matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + The matrix to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + The array to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided enumerable. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new diagonal matrix with diagonal values sampled from the provided random distribution. + + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to add. + The matrix to store the result of the division. + + + + Computes the determinant of this matrix. + + The determinant of this matrix. + + + + Returns the elements of the diagonal in a . + + The elements of the diagonal. + For non-square matrices, the method returns Min(Rows, Columns) elements where + i == j (i is the row index, and j is the column index). + + + + Copies the values of the given array to the diagonal. + + The array to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + + Copies the values of the given to the diagonal. + + The vector to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced L2 norm of the matrix. + The largest singular value of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + Calculates the condition number of this matrix. + The condition number of the matrix. + + + Computes the inverse of this matrix. + If is not a square matrix. + If is singular. + The inverse of this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Creates a matrix that contains the values from the requested sub-matrix. + + The row to start copying from. + The number of rows to copy. Must be positive. + The column to start copying from. + The number of columns to copy. Must be positive. + The requested sub-matrix. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + If or + is not positive. + + + + Permute the columns of a matrix according to a permutation. + + The column permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Permute the rows of a matrix according to a permutation. + + The row permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Evaluates whether this matrix is symmetric. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A class which encapsulates the functionality of a Cholesky factorization. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Gets the determinant of the matrix for which the Cholesky matrix was computed. + + + + + Gets the log determinant of the matrix for which the Cholesky matrix was computed. + + + + + A class which encapsulates the functionality of a Cholesky factorization for dense matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a complex matrix. + + + If A is hermitan, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is hermitan. + I.e. A = V*D*V' and V*VH=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Reduces a complex hermitian matrix to a real symmetric tridiagonal matrix using unitary similarity transformations. + + Source matrix to reduce + Output: Arrays for internal storage of real parts of eigenvalues + Output: Arrays for internal storage of imaginary parts of eigenvalues + Output: Arrays that contains further information about the transformations. + Order of initial matrix + This is derived from the Algol procedures HTRIDI by + Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + Data array of matrix V (eigenvectors) + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Determines eigenvectors by undoing the symmetric tridiagonalize transformation + + Data array of matrix V (eigenvectors) + Previously tridiagonalized matrix by . + Contains further information about the transformations + Input matrix order + This is derived from the Algol procedures HTRIBK, by + by Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Nonsymmetric reduction to Hessenberg form. + + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + Data array of the eigenvectors + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any complex square matrix A may be decomposed as A = QR where Q is an unitary mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an unitary matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Factorize matrix using the modified Gram-Schmidt method. + + Initial matrix. On exit is replaced by Q. + Number of rows in Q. + Number of columns in Q. + On exit is filled by R. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Gets or sets Tau vector. Contains additional information on Q - used for native solver. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The QR factorization method to use. + If is null. + If row count is less then column count + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + If SVD algorithm failed to converge with matrix . + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Gets the absolute value of determinant of the square matrix for which the EVD was computed. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + In the Math.Net implementation we also store a set of pivot elements for increased + numerical stability. The pivot elements encode a permutation matrix P such that P*A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Gets the determinant of the matrix for which the LU factorization was computed. + + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A (m x n) may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + If a factorization is peformed, the resulting Q matrix is an m x m matrix + and the R matrix is an m x n matrix. If a factorization is performed, the + resulting Q matrix is an m x n matrix and the R matrix is an n x n matrix. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD). + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets the two norm of the . + + The 2-norm of the . + + + + Gets the condition number max(S) / min(S) + + The condition number. + + + + Gets the determinant of the square matrix for which the SVD was computed. + + + + + A class which encapsulates the functionality of a Cholesky factorization for user matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a complex matrix. + + + If A is hermitan, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is hermitan. + I.e. A = V*D*V' and V*VH=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Reduces a complex hermitian matrix to a real symmetric tridiagonal matrix using unitary similarity transformations. + + Source matrix to reduce + Output: Arrays for internal storage of real parts of eigenvalues + Output: Arrays for internal storage of imaginary parts of eigenvalues + Output: Arrays that contains further information about the transformations. + Order of initial matrix + This is derived from the Algol procedures HTRIDI by + Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + The eigen vectors to work on. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Determines eigenvectors by undoing the symmetric tridiagonalize transformation + + The eigen vectors to work on. + Previously tridiagonalized matrix by . + Contains further information about the transformations + Input matrix order + This is derived from the Algol procedures HTRIBK, by + by Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Nonsymmetric reduction to Hessenberg form. + + The eigen vectors to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + The eigen vectors to work on. + The eigen values to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any complex square matrix A may be decomposed as A = QR where Q is an unitary mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an unitary matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The QR factorization method to use. + If is null. + + + + Generate column from initial matrix to work array + + Initial matrix + The first row + Column index + Generated vector + + + + Perform calculation of Q or R + + Work array + Q or R matrices + The first row + The last row + The first column + The last column + Number of available CPUs + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + + + + + Calculates absolute value of multiplied on signum function of + + Complex32 value z1 + Complex32 value z2 + Result multiplication of signum function and absolute value + + + + Interchanges two vectors and + + Source matrix + The number of rows in + Column A index to swap + Column B index to swap + + + + Scale column by starting from row + + Source matrix + The number of rows in + Column to scale + Row to scale from + Scale value + + + + Scale vector by starting from index + + Source vector + Row to scale from + Scale value + + + + Given the Cartesian coordinates (da, db) of a point p, these fucntion return the parameters da, db, c, and s + associated with the Givens rotation that zeros the y-coordinate of the point. + + Provides the x-coordinate of the point p. On exit contains the parameter r associated with the Givens rotation + Provides the y-coordinate of the point p. On exit contains the parameter z associated with the Givens rotation + Contains the parameter c associated with the Givens rotation + Contains the parameter s associated with the Givens rotation + This is equivalent to the DROTG LAPACK routine. + + + + Calculate Norm 2 of the column in matrix starting from row + + Source matrix + The number of rows in + Column index + Start row index + Norm2 (Euclidean norm) of the column + + + + Calculate Norm 2 of the vector starting from index + + Source vector + Start index + Norm2 (Euclidean norm) of the vector + + + + Calculate dot product of and conjugating the first vector. + + Source matrix + The number of rows in + Index of column A + Index of column B + Starting row index + Dot product value + + + + Performs rotation of points in the plane. Given two vectors x and y , + each vector element of these vectors is replaced as follows: x(i) = c*x(i) + s*y(i); y(i) = c*y(i) - s*x(i) + + Source matrix + The number of rows in + Index of column A + Index of column B + scalar cos value + scalar sin value + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Complex32 version of the class. + + + + + Initializes a new instance of the Matrix class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Returns the conjugate transpose of this matrix. + + The conjugate transpose of this matrix. + + + + Puts the conjugate transpose of this matrix into the result matrix. + + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to divide by each element of the matrix. + The matrix to store the result of the division. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The matrix to store the result of the pointwise power. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Pointwise applies the exponential function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Computes the Moore-Penrose Pseudo-Inverse of this matrix. + + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Calculates the p-norms of all row vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the p-norms of all column vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all row vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all column vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the value sum of each row vector. + + + + + Calculates the absolute value sum of each row vector. + + + + + Calculates the value sum of each column vector. + + + + + Calculates the absolute value sum of each column vector. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Bi-Conjugate Gradient Stabilized (BiCGStab) solver is an 'improvement' + of the standard Conjugate Gradient (CG) solver. Unlike the CG solver the + BiCGStab can be used on non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The Bi-CGSTAB algorithm was taken from:
+ Templates for the solution of linear systems: Building blocks + for iterative methods +
+ Richard Barrett, Michael Berry, Tony F. Chan, James Demmel, + June M. Donato, Jack Dongarra, Victor Eijkhout, Roldan Pozo, + Charles Romine and Henk van der Vorst +
+ Url: http://www.netlib.org/templates/Templates.html +
+ Algorithm is described in Chapter 2, section 2.3.8, page 27 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient , A. + The solution , b. + The result , x. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A composite matrix solver. The actual solver is made by a sequence of + matrix solvers. + + + + Solver based on:
+ Faster PDE-based simulations using robust composite linear solvers
+ S. Bhowmicka, P. Raghavan a,*, L. McInnes b, B. Norris
+ Future Generation Computer Systems, Vol 20, 2004, pp 373–387
+
+ + Note that if an iterator is passed to this solver it will be used for all the sub-solvers. + +
+
+ + + The collection of solvers that will be used + + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A diagonal preconditioner. The preconditioner uses the inverse + of the matrix diagonal as preconditioning values. + + + + + The inverse of the matrix diagonal. + + + + + Returns the decomposed matrix diagonal. + + The matrix diagonal. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + A Generalized Product Bi-Conjugate Gradient iterative matrix solver. + + + + The Generalized Product Bi-Conjugate Gradient (GPBiCG) solver is an + alternative version of the Bi-Conjugate Gradient stabilized (CG) solver. + Unlike the CG solver the GPBiCG solver can be used on + non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The GPBiCG algorithm was taken from:
+ GPBiCG(m,l): A hybrid of BiCGSTAB and GPBiCG methods with + efficiency and robustness +
+ S. Fujino +
+ Applied Numerical Mathematics, Volume 41, 2002, pp 107 - 117 +
+
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Indicates the number of BiCGStab steps should be taken + before switching. + + + + + Indicates the number of GPBiCG steps should be taken + before switching. + + + + + Gets or sets the number of steps taken with the BiCgStab algorithm + before switching over to the GPBiCG algorithm. + + + + + Gets or sets the number of steps taken with the GPBiCG algorithm + before switching over to the BiCgStab algorithm. + + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Decide if to do steps with BiCgStab + + Number of iteration + true if yes, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + An incomplete, level 0, LU factorization preconditioner. + + + The ILU(0) algorithm was taken from:
+ Iterative methods for sparse linear systems
+ Yousef Saad
+ Algorithm is described in Chapter 10, section 10.3.2, page 275
+
+
+ + + The matrix holding the lower (L) and upper (U) matrices. The + decomposition matrices are combined to reduce storage. + + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + A new matrix containing the lower triagonal elements. + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + This class performs an Incomplete LU factorization with drop tolerance + and partial pivoting. The drop tolerance indicates which additional entries + will be dropped from the factorized LU matrices. + + + The ILUTP-Mem algorithm was taken from:
+ ILUTP_Mem: a Space-Efficient Incomplete LU Preconditioner +
+ Tzu-Yi Chen, Department of Mathematics and Computer Science,
+ Pomona College, Claremont CA 91711, USA
+ Published in:
+ Lecture Notes in Computer Science
+ Volume 3046 / 2004
+ pp. 20 - 28
+ Algorithm is described in Section 2, page 22 +
+
+ + + The default fill level. + + + + + The default drop tolerance. + + + + + The decomposed upper triangular matrix. + + + + + The decomposed lower triangular matrix. + + + + + The array containing the pivot values. + + + + + The fill level. + + + + + The drop tolerance. + + + + + The pivot tolerance. + + + + + Initializes a new instance of the class with the default settings. + + + + + Initializes a new instance of the class with the specified settings. + + + The amount of fill that is allowed in the matrix. The value is a fraction of + the number of non-zero entries in the original matrix. Values should be positive. + + + The absolute drop tolerance which indicates below what absolute value an entry + will be dropped from the matrix. A drop tolerance of 0.0 means that no values + will be dropped. Values should always be positive. + + + The pivot tolerance which indicates at what level pivoting will take place. A + value of 0.0 means that no pivoting will take place. + + + + + Gets or sets the amount of fill that is allowed in the matrix. The + value is a fraction of the number of non-zero entries in the original + matrix. The standard value is 200. + + + + Values should always be positive and can be higher than 1.0. A value lower + than 1.0 means that the eventual preconditioner matrix will have fewer + non-zero entries as the original matrix. A value higher than 1.0 means that + the eventual preconditioner can have more non-zero values than the original + matrix. + + + Note that any changes to the FillLevel after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the absolute drop tolerance which indicates below what absolute value + an entry will be dropped from the matrix. The standard value is 0.0001. + + + + The values should always be positive and can be larger than 1.0. A low value will + keep more small numbers in the preconditioner matrix. A high value will remove + more small numbers from the preconditioner matrix. + + + Note that any changes to the DropTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the pivot tolerance which indicates at what level pivoting will + take place. The standard value is 0.0 which means pivoting will never take place. + + + + The pivot tolerance is used to calculate if pivoting is necessary. Pivoting + will take place if any of the values in a row is bigger than the + diagonal value of that row divided by the pivot tolerance, i.e. pivoting + will take place if row(i,j) > row(i,i) / PivotTolerance for + any j that is not equal to i. + + + Note that any changes to the PivotTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the lower triagonal elements. + + + + Returns the pivot array. This array is not needed for normal use because + the preconditioner will return the solution vector values in the proper order. + + + This method is used for debugging purposes only and should normally not be used. + + The pivot array. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. Note that the + method takes a general matrix type. However internally the data is stored + as a sparse matrix. Therefore it is not recommended to pass a dense matrix. + + If is . + If is not a square matrix. + + + + Pivot elements in the according to internal pivot array + + Row to pivot in + + + + Was pivoting already performed + + Pivots already done + Current item to pivot + true if performed, otherwise false + + + + Swap columns in the + + Source . + First column index to swap + Second column index to swap + + + + Sort vector descending, not changing vector but placing sorted indicies to + + Start sort form + Sort till upper bound + Array with sorted vector indicies + Source + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + Pivot elements in according to internal pivot array + + Source . + Result after pivoting. + + + + An element sort algorithm for the class. + + + This sort algorithm is used to sort the columns in a sparse matrix based on + the value of the element on the diagonal of the matrix. + + + + + Sorts the elements of the vector in decreasing + fashion. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Sorts the elements of the vector in decreasing + fashion using heap sort algorithm. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Build heap for double indicies + + Root position + Length of + Indicies of + Target + + + + Sift double indicies + + Indicies of + Target + Root position + Length of + + + + Sorts the given integers in a decreasing fashion. + + The values. + + + + Sort the given integers in a decreasing fashion using heapsort algorithm + + Array of values to sort + Length of + + + + Build heap + + Target values array + Root position + Length of + + + + Sift values + + Target value array + Root position + Length of + + + + Exchange values in array + + Target values array + First value to exchange + Second value to exchange + + + + A simple milu(0) preconditioner. + + + Original Fortran code by Youcef Saad (07 January 2004) + + + + Use modified or standard ILU(0) + + + + Gets or sets a value indicating whether to use modified or standard ILU(0). + + + + + Gets a value indicating whether the preconditioner is initialized. + + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square or is not an + instance of SparseCompressedRowMatrixStorage. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector b. + The left hand side vector x. + + + + MILU0 is a simple milu(0) preconditioner. + + Order of the matrix. + Matrix values in CSR format (input). + Column indices (input). + Row pointers (input). + Matrix values in MSR format (output). + Row pointers and column indices (output). + Pointer to diagonal elements (output). + True if the modified/MILU algorithm should be used (recommended) + Returns 0 on success or k > 0 if a zero pivot was encountered at step k. + + + + A Multiple-Lanczos Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Multiple-Lanczos Bi-Conjugate Gradient stabilized (ML(k)-BiCGStab) solver is an 'improvement' + of the standard BiCgStab solver. + + + The algorithm was taken from:
+ ML(k)BiCGSTAB: A BiCGSTAB variant based on multiple Lanczos starting vectors +
+ Man-chung Yeung and Tony F. Chan +
+ SIAM Journal of Scientific Computing +
+ Volume 21, Number 4, pp. 1263 - 1290 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + The default number of starting vectors. + + + + + The collection of starting vectors which are used as the basis for the Krylov sub-space. + + + + + The number of starting vectors used by the algorithm + + + + + Gets or sets the number of starting vectors. + + + Must be larger than 1 and smaller than the number of variables in the matrix that + for which this solver will be used. + + + + + Resets the number of starting vectors to the default value. + + + + + Gets or sets a series of orthonormal vectors which will be used as basis for the + Krylov sub-space. + + + + + Gets the number of starting vectors to create + + Maximum number + Number of variables + Number of starting vectors to create + + + + Returns an array of starting vectors. + + The maximum number of starting vectors that should be created. + The number of variables. + + An array with starting vectors. The array will never be larger than the + but it may be smaller if + the is smaller than + the . + + + + + Create random vectors array + + Number of vectors + Size of each vector + Array of random vectors + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Source A. + Residual data. + x data. + b data. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Transpose Free Quasi-Minimal Residual (TFQMR) iterative matrix solver. + + + + The TFQMR algorithm was taken from:
+ Iterative methods for sparse linear systems. +
+ Yousef Saad +
+ Algorithm is described in Chapter 7, section 7.4.3, page 219 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Is even? + + Number to check + true if even, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Matrix with sparse storage, intended for very large matrices where most of the cells are zero. + The underlying storage scheme is 3-array compressed-sparse-row (CSR) Format. + Wikipedia - CSR. + + + + + Gets the number of non zero elements in the matrix. + + The number of non zero elements. + + + + Create a new sparse matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new sparse matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable. + The enumerable is assumed to be in row-major order (row by row). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + + Create a new sparse matrix with the given number of rows and columns as a copy of the given array. + The array is assumed to be in column-major order (column by column). + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix and initialize each value to the same provided value. + + + + + Create a new sparse matrix and initialize each value using the provided init function. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Evaluates whether this matrix is symmetric. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + A vector with sparse storage, intended for very large vectors where most of the cells are zero. + + The sparse vector is not thread safe. + + + + Gets the number of non zero elements in the vector. + + The number of non zero elements. + + + + Create a new sparse vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new sparse vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new sparse vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector and initialize each value using the provided value. + + + + + Create a new sparse vector and initialize each value using the provided init function. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + Warning, the new 'sparse vector' with a non-zero scalar added to it will be a 100% filled + sparse vector and very inefficient. Would be better to work with a dense vector instead. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Negates vector and saves result to + + Target vector + + + + Conjugates vector and save result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Multiplies a vector with a complex. + + The vector to scale. + The complex value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a complex. + + The complex value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a complex. + + The vector to divide. + The complex value. + The result of the division. + If is . + + + + Computes the modulus of each element of the vector of the given divisor. + + The vector whose elements we want to compute the modulus of. + The divisor to use, + The result of the calculation + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Creates a double sparse vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n;n;..', '(n;n;..)', '[n;n;...]', where n is a Complex32. + + + A double sparse vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a complex sparse vector to double-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a complex sparse vector to double-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Complex32 version of the class. + + + + + Initializes a new instance of the Vector class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Conjugates vector and save result to + + Target vector + + + + Negates vector and saves result to + + Target vector + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Divides each element of the vector by a scalar and stores the result in the result vector. + + + The scalar to divide with. + + + The vector to store the result of the division. + + + + + Divides a scalar by each element of the vector and stores the result in the result vector. + + The scalar to divide. + The vector to store the result of the division. + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise raise this vector to an exponent and store the result into the result vector. + + The exponent to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Returns the value of the absolute minimum element. + + The value of the absolute minimum element. + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the value of the absolute maximum element. + + The value of the absolute maximum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + + The p value. + + + Scalar ret = ( ∑|At(i)|^p )^(1/p) + + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Normalizes this vector to a unit vector with respect to the p-norm. + + + The p value. + + + This vector normalized to a unit vector with respect to the p-norm. + + + + + Generic linear algebra type builder, for situations where a matrix or vector + must be created in a generic way. Usage of generic builders should not be + required in normal user code. + + + + + Gets the value of 0.0 for type T. + + + + + Gets the value of 1.0 for type T. + + + + + Create a new matrix straight from an initialized matrix storage instance. + If you have an instance of a discrete storage type instead, use their direct methods instead. + + + + + Create a new matrix with the same kind of the provided example. + + + + + Create a new matrix with the same kind and dimensions of the provided example. + + + + + Create a new matrix with the same kind of the provided example. + + + + + Create a new matrix with a type that can represent and is closest to both provided samples. + + + + + Create a new matrix with a type that can represent and is closest to both provided samples and the dimensions of example. + + + + + Create a new dense matrix with values sampled from the provided random distribution. + + + + + Create a new dense matrix with values sampled from the standard distribution with a system random source. + + + + + Create a new dense matrix with values sampled from the standard distribution with a system random source. + + + + + Create a new positive definite dense matrix where each value is the product + of two samples from the provided random distribution. + + + + + Create a new positive definite dense matrix where each value is the product + of two samples from the standard distribution. + + + + + Create a new positive definite dense matrix where each value is the product + of two samples from the provided random distribution. + + + + + Create a new dense matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + + + + Create a new dense matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to be in column-major order (column by column) and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + + Create a new dense matrix and initialize each value to the same provided value. + + + + + Create a new dense matrix and initialize each value using the provided init function. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value using the provided init function. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new dense matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable. + The enumerable is assumed to be in column-major order (column by column). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable. + The enumerable is assumed to be in row-major order (row by row). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix from a 2D array of existing matrices. + The matrices in the array are not required to be dense already. + If the matrices do not align properly, they are placed on the top left + corner of their cell with the remaining fields left zero. + + + + + Create a new sparse matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a sparse matrix of T with the given number of rows and columns. + + The number of rows. + The number of columns. + + + + Create a new sparse matrix and initialize each value to the same provided value. + + + + + Create a new sparse matrix and initialize each value using the provided init function. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value using the provided init function. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new sparse matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable. + The enumerable is assumed to be in row-major order (row by row). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + + Create a new sparse matrix with the given number of rows and columns as a copy of the given array. + The array is assumed to be in column-major order (column by column). + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix from a 2D array of existing matrices. + The matrices in the array are not required to be sparse already. + If the matrices do not align properly, they are placed on the top left + corner of their cell with the remaining fields left zero. + + + + + Create a new diagonal matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + + + + Create a new diagonal matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to represent the diagonal values and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new square diagonal matrix directly binding to a raw array. + The array is assumed to represent the diagonal values and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new diagonal matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal matrix and initialize each diagonal value using the provided init function. + + + + + Create a new diagonal identity matrix with a one-diagonal. + + + + + Create a new diagonal identity matrix with a one-diagonal. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Generic linear algebra type builder, for situations where a matrix or vector + must be created in a generic way. Usage of generic builders should not be + required in normal user code. + + + + + Gets the value of 0.0 for type T. + + + + + Gets the value of 1.0 for type T. + + + + + Create a new vector straight from an initialized matrix storage instance. + If you have an instance of a discrete storage type instead, use their direct methods instead. + + + + + Create a new vector with the same kind of the provided example. + + + + + Create a new vector with the same kind and dimension of the provided example. + + + + + Create a new vector with the same kind of the provided example. + + + + + Create a new vector with a type that can represent and is closest to both provided samples. + + + + + Create a new vector with a type that can represent and is closest to both provided samples and the dimensions of example. + + + + + Create a new vector with a type that can represent and is closest to both provided samples. + + + + + Create a new dense vector with values sampled from the provided random distribution. + + + + + Create a new dense vector with values sampled from the standard distribution with a system random source. + + + + + Create a new dense vector with values sampled from the standard distribution with a system random source. + + + + + Create a new dense vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a dense vector of T with the given size. + + The size of the vector. + + + + Create a dense vector of T that is directly bound to the specified array. + + + + + Create a new dense vector and initialize each value using the provided value. + + + + + Create a new dense vector and initialize each value using the provided init function. + + + + + Create a new dense vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a sparse vector of T with the given size. + + The size of the vector. + + + + Create a new sparse vector and initialize each value using the provided value. + + + + + Create a new sparse vector and initialize each value using the provided init function. + + + + + Create a new sparse vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new matrix straight from an initialized matrix storage instance. + If you have an instance of a discrete storage type instead, use their direct methods instead. + + + + + Create a new matrix with the same kind of the provided example. + + + + + Create a new matrix with the same kind and dimensions of the provided example. + + + + + Create a new matrix with the same kind of the provided example. + + + + + Create a new matrix with a type that can represent and is closest to both provided samples. + + + + + Create a new matrix with a type that can represent and is closest to both provided samples and the dimensions of example. + + + + + Create a new dense matrix with values sampled from the provided random distribution. + + + + + Create a new dense matrix with values sampled from the standard distribution with a system random source. + + + + + Create a new dense matrix with values sampled from the standard distribution with a system random source. + + + + + Create a new positive definite dense matrix where each value is the product + of two samples from the provided random distribution. + + + + + Create a new positive definite dense matrix where each value is the product + of two samples from the standard distribution. + + + + + Create a new positive definite dense matrix where each value is the product + of two samples from the provided random distribution. + + + + + Create a new dense matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + + + + Create a new dense matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to be in column-major order (column by column) and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + + Create a new dense matrix and initialize each value to the same provided value. + + + + + Create a new dense matrix and initialize each value using the provided init function. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value using the provided init function. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new dense matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable. + The enumerable is assumed to be in column-major order (column by column). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix from a 2D array of existing matrices. + The matrices in the array are not required to be dense already. + If the matrices do not align properly, they are placed on the top left + corner of their cell with the remaining fields left zero. + + + + + Create a new sparse matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a sparse matrix of T with the given number of rows and columns. + + The number of rows. + The number of columns. + + + + Create a new sparse matrix and initialize each value to the same provided value. + + + + + Create a new sparse matrix and initialize each value using the provided init function. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value using the provided init function. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new sparse matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable. + The enumerable is assumed to be in row-major order (row by row). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + + Create a new sparse matrix with the given number of rows and columns as a copy of the given array. + The array is assumed to be in column-major order (column by column). + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix from a 2D array of existing matrices. + The matrices in the array are not required to be sparse already. + If the matrices do not align properly, they are placed on the top left + corner of their cell with the remaining fields left zero. + + + + + Create a new diagonal matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + + + + Create a new diagonal matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to represent the diagonal values and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new square diagonal matrix directly binding to a raw array. + The array is assumed to represent the diagonal values and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new diagonal matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal matrix and initialize each diagonal value using the provided init function. + + + + + Create a new diagonal identity matrix with a one-diagonal. + + + + + Create a new diagonal identity matrix with a one-diagonal. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new vector straight from an initialized matrix storage instance. + If you have an instance of a discrete storage type instead, use their direct methods instead. + + + + + Create a new vector with the same kind of the provided example. + + + + + Create a new vector with the same kind and dimension of the provided example. + + + + + Create a new vector with the same kind of the provided example. + + + + + Create a new vector with a type that can represent and is closest to both provided samples. + + + + + Create a new vector with a type that can represent and is closest to both provided samples and the dimensions of example. + + + + + Create a new vector with a type that can represent and is closest to both provided samples. + + + + + Create a new dense vector with values sampled from the provided random distribution. + + + + + Create a new dense vector with values sampled from the standard distribution with a system random source. + + + + + Create a new dense vector with values sampled from the standard distribution with a system random source. + + + + + Create a new dense vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a dense vector of T with the given size. + + The size of the vector. + + + + Create a dense vector of T that is directly bound to the specified array. + + + + + Create a new dense vector and initialize each value using the provided value. + + + + + Create a new dense vector and initialize each value using the provided init function. + + + + + Create a new dense vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a sparse vector of T with the given size. + + The size of the vector. + + + + Create a new sparse vector and initialize each value using the provided value. + + + + + Create a new sparse vector and initialize each value using the provided init function. + + + + + Create a new sparse vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + A class which encapsulates the functionality of a Cholesky factorization. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + Supported data types are double, single, , and . + + + + Gets the lower triangular form of the Cholesky matrix. + + + + + Gets the determinant of the matrix for which the Cholesky matrix was computed. + + + + + Gets the log determinant of the matrix for which the Cholesky matrix was computed. + + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + Supported data types are double, single, , and . + + + + Gets or sets a value indicating whether matrix is symmetric or not + + + + + Gets the absolute value of determinant of the square matrix for which the EVD was computed. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + Gets or sets the eigen values (λ) of matrix in ascending value. + + + + + Gets or sets eigenvectors. + + + + + Gets or sets the block diagonal eigenvalue matrix. + + + + + Solves a system of linear equations, AX = B, with A EVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, AX = B, with A EVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + Supported data types are double, single, , and . + + + + Classes that solves a system of linear equations, AX = B. + + Supported data types are double, single, , and . + + + + Solves a system of linear equations, AX = B. + + The right hand side Matrix, B. + The left hand side Matrix, X. + + + + Solves a system of linear equations, AX = B. + + The right hand side Matrix, B. + The left hand side Matrix, X. + + + + Solves a system of linear equations, Ax = b + + The right hand side vector, b. + The left hand side Vector, x. + + + + Solves a system of linear equations, Ax = b. + + The right hand side vector, b. + The left hand side Matrix>, x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + In the Math.Net implementation we also store a set of pivot elements for increased + numerical stability. The pivot elements encode a permutation matrix P such that P*A = L*U. + + + The computation of the LU factorization is done at construction time. + + Supported data types are double, single, , and . + + + + Gets the lower triangular factor. + + + + + Gets the upper triangular factor. + + + + + Gets the permutation applied to LU factorization. + + + + + Gets the determinant of the matrix for which the LU factorization was computed. + + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + The type of QR factorization go perform. + + + + + Compute the full QR factorization of a matrix. + + + + + Compute the thin QR factorization of a matrix. + + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A (m x n) may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + If a factorization is performed, the resulting Q matrix is an m x m matrix + and the R matrix is an m x n matrix. If a factorization is performed, the + resulting Q matrix is an m x n matrix and the R matrix is an n x n matrix. + + Supported data types are double, single, , and . + + + + Gets or sets orthogonal Q matrix + + + + + Gets the upper triangular factor R. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD). + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + Supported data types are double, single, , and . + + + Indicating whether U and VT matrices have been computed during SVD factorization. + + + + Gets the singular values (Σ) of matrix in ascending value. + + + + + Gets the left singular vectors (U - m-by-m unitary matrix) + + + + + Gets the transpose right singular vectors (transpose of V, an n-by-n unitary matrix) + + + + + Returns the singular values as a diagonal . + + The singular values as a diagonal . + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets the two norm of the . + + The 2-norm of the . + + + + Gets the condition number max(S) / min(S) + + The condition number. + + + + Gets the determinant of the square matrix for which the SVD was computed. + + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Defines the base class for Matrix classes. + + + Defines the base class for Matrix classes. + + Supported data types are double, single, , and . + + Defines the base class for Matrix classes. + + + Defines the base class for Matrix classes. + + + + + The value of 1.0. + + + + + The value of 0.0. + + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the matrix and stores the result in the result matrix. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts each element of the matrix from a scalar and stores the result in the result matrix. + + The scalar to subtract from. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar denominator to use. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar numerator to use. + The matrix to store the result of the division. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The matrix to store the result of the pointwise power. + + + + Pointwise raise this matrix to an exponent matrix and store the result into the result matrix. + + The exponent matrix to raise this matrix values to. + The matrix to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Adds a scalar to each element of the matrix. + + The scalar to add. + The result of the addition. + If the two matrices don't have the same dimensions. + + + + Adds a scalar to each element of the matrix and stores the result in the result matrix. + + The scalar to add. + The matrix to store the result of the addition. + If the two matrices don't have the same dimensions. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The result of the addition. + If the two matrices don't have the same dimensions. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the matrix. + + The scalar to subtract. + A new matrix containing the subtraction of this matrix and the scalar. + + + + Subtracts a scalar from each element of the matrix and stores the result in the result matrix. + + The scalar to subtract. + The matrix to store the result of the subtraction. + If this matrix and are not the same size. + + + + Subtracts each element of the matrix from a scalar. + + The scalar to subtract from. + A new matrix containing the subtraction of the scalar and this matrix. + + + + Subtracts each element of the matrix from a scalar and stores the result in the result matrix. + + The scalar to subtract from. + The matrix to store the result of the subtraction. + If this matrix and are not the same size. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The result of the subtraction. + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + If the two matrices don't have the same dimensions. + + + + Multiplies each element of this matrix with a scalar. + + The scalar to multiply with. + The result of the multiplication. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + If the result matrix's dimensions are not the same as this matrix. + + + + Divides each element of this matrix with a scalar. + + The scalar to divide with. + The result of the division. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + If the result matrix's dimensions are not the same as this matrix. + + + + Divides a scalar by each element of the matrix. + + The scalar to divide. + The result of the division. + + + + Divides a scalar by each element of the matrix and places results into the result matrix. + + The scalar to divide. + The matrix to store the result of the division. + If the result matrix's dimensions are not the same as this matrix. + + + + Multiplies this matrix by a vector and returns the result. + + The vector to multiply with. + The result of the multiplication. + If this.ColumnCount != rightSide.Count. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + If result.Count != this.RowCount. + If this.ColumnCount != .Count. + + + + Left multiply a matrix with a vector ( = vector * matrix ). + + The vector to multiply with. + The result of the multiplication. + If this.RowCount != .Count. + + + + Left multiply a matrix with a vector ( = vector * matrix ) and place the result in the result vector. + + The vector to multiply with. + The result of the multiplication. + If result.Count != this.ColumnCount. + If this.RowCount != .Count. + + + + Left multiply a matrix with a vector ( = vector * matrix ) and place the result in the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + If this.Columns != other.Rows. + If the result matrix's dimensions are not the this.Rows x other.Columns. + + + + Multiplies this matrix with another matrix and returns the result. + + The matrix to multiply with. + If this.Columns != other.Rows. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + If this.Columns != other.ColumnCount. + If the result matrix's dimensions are not the this.RowCount x other.RowCount. + + + + Multiplies this matrix with transpose of another matrix and returns the result. + + The matrix to multiply with. + If this.Columns != other.ColumnCount. + The result of the multiplication. + + + + Multiplies the transpose of this matrix by a vector and returns the result. + + The vector to multiply with. + The result of the multiplication. + If this.RowCount != rightSide.Count. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + If result.Count != this.ColumnCount. + If this.RowCount != .Count. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + If this.Rows != other.RowCount. + If the result matrix's dimensions are not the this.ColumnCount x other.ColumnCount. + + + + Multiplies the transpose of this matrix with another matrix and returns the result. + + The matrix to multiply with. + If this.Rows != other.RowCount. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + If this.Columns != other.ColumnCount. + If the result matrix's dimensions are not the this.RowCount x other.RowCount. + + + + Multiplies this matrix with the conjugate transpose of another matrix and returns the result. + + The matrix to multiply with. + If this.Columns != other.ColumnCount. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix by a vector and returns the result. + + The vector to multiply with. + The result of the multiplication. + If this.RowCount != rightSide.Count. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + If result.Count != this.ColumnCount. + If this.RowCount != .Count. + + + + Multiplies the conjugate transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + If this.Rows != other.RowCount. + If the result matrix's dimensions are not the this.ColumnCount x other.ColumnCount. + + + + Multiplies the conjugate transpose of this matrix with another matrix and returns the result. + + The matrix to multiply with. + If this.Rows != other.RowCount. + The result of the multiplication. + + + + Raises this square matrix to a positive integer exponent and places the results into the result matrix. + + The positive integer exponent to raise the matrix to. + The result of the power. + + + + Multiplies this square matrix with another matrix and returns the result. + + The positive integer exponent to raise the matrix to. + + + + Negate each element of this matrix. + + A matrix containing the negated values. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + if the result matrix's dimensions are not the same as this matrix. + + + + Complex conjugate each element of this matrix. + + A matrix containing the conjugated values. + + + + Complex conjugate each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + if the result matrix's dimensions are not the same as this matrix. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the matrix. + + The scalar denominator to use. + A matrix containing the results. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the matrix. + + The scalar numerator to use. + A matrix containing the results. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the matrix. + + The scalar numerator to use. + Matrix to store the results in. + + + + Computes the remainder (matrix % divisor), where the result has the sign of the dividend, + for each element of the matrix. + + The scalar denominator to use. + A matrix containing the results. + + + + Computes the remainder (matrix % divisor), where the result has the sign of the dividend, + for each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (dividend % matrix), where the result has the sign of the dividend, + for each element of the matrix. + + The scalar numerator to use. + A matrix containing the results. + + + + Computes the remainder (dividend % matrix), where the result has the sign of the dividend, + for each element of the matrix. + + The scalar numerator to use. + Matrix to store the results in. + + + + Pointwise multiplies this matrix with another matrix. + + The matrix to pointwise multiply with this one. + If this matrix and are not the same size. + A new matrix that is the pointwise multiplication of this matrix and . + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + If this matrix and are not the same size. + If this matrix and are not the same size. + + + + Pointwise divide this matrix by another matrix. + + The pointwise denominator matrix to use. + If this matrix and are not the same size. + A new matrix that is the pointwise division of this matrix and . + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use. + The matrix to store the result of the pointwise division. + If this matrix and are not the same size. + If this matrix and are not the same size. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + + + + Pointwise raise this matrix to an exponent. + + The exponent to raise this matrix values to. + The matrix to store the result into. + If this matrix and are not the same size. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + + + + Pointwise raise this matrix to an exponent. + + The exponent to raise this matrix values to. + The matrix to store the result into. + If this matrix and are not the same size. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this matrix by another matrix. + + The pointwise denominator matrix to use. + If this matrix and are not the same size. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this matrix by another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use. + The matrix to store the result of the pointwise modulus. + If this matrix and are not the same size. + If this matrix and are not the same size. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this matrix by another matrix. + + The pointwise denominator matrix to use. + If this matrix and are not the same size. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this matrix by another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use. + The matrix to store the result of the pointwise remainder. + If this matrix and are not the same size. + If this matrix and are not the same size. + + + + Helper function to apply a unary function to a matrix. The function + f modifies the matrix given to it in place. Before its + called, a copy of the 'this' matrix is first created, then passed to + f. The copy is then returned as the result + + Function which takes a matrix, modifies it in place and returns void + New instance of matrix which is the result + + + + Helper function to apply a unary function which modifies a matrix + in place. + + Function which takes a matrix, modifies it in place and returns void + The matrix to be passed to f and where the result is to be stored + If this vector and are not the same size. + + + + Helper function to apply a binary function which takes two matrices + and modifies the latter in place. A copy of the "this" matrix is + first made and then passed to f together with the other matrix. The + copy is then returned as the result + + Function which takes two matrices, modifies the second in place and returns void + The other matrix to be passed to the function as argument. It is not modified + The resulting matrix + If this amtrix and are not the same dimension. + + + + Helper function to apply a binary function which takes two matrices + and modifies the second one in place + + Function which takes two matrices, modifies the second in place and returns void + The other matrix to be passed to the function as argument. It is not modified + The resulting matrix + If this matrix and are not the same dimension. + + + + Pointwise applies the exponent function to each value. + + + + + Pointwise applies the exponent function to each value. + + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the natural logarithm function to each value. + + + + + Pointwise applies the natural logarithm function to each value. + + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the abs function to each value + + + + + Pointwise applies the abs function to each value + + The vector to store the result + + + + Pointwise applies the acos function to each value + + + + + Pointwise applies the acos function to each value + + The vector to store the result + + + + Pointwise applies the asin function to each value + + + + + Pointwise applies the asin function to each value + + The vector to store the result + + + + Pointwise applies the atan function to each value + + + + + Pointwise applies the atan function to each value + + The vector to store the result + + + + Pointwise applies the atan2 function to each value of the current + matrix and a given other matrix being the 'x' of atan2 and the + 'this' matrix being the 'y' + + + + + + + Pointwise applies the atan2 function to each value of the current + matrix and a given other matrix being the 'x' of atan2 and the + 'this' matrix being the 'y' + + + + + + + Pointwise applies the ceiling function to each value + + + + + Pointwise applies the ceiling function to each value + + The vector to store the result + + + + Pointwise applies the cos function to each value + + + + + Pointwise applies the cos function to each value + + The vector to store the result + + + + Pointwise applies the cosh function to each value + + + + + Pointwise applies the cosh function to each value + + The vector to store the result + + + + Pointwise applies the floor function to each value + + + + + Pointwise applies the floor function to each value + + The vector to store the result + + + + Pointwise applies the log10 function to each value + + + + + Pointwise applies the log10 function to each value + + The vector to store the result + + + + Pointwise applies the round function to each value + + + + + Pointwise applies the round function to each value + + The vector to store the result + + + + Pointwise applies the sign function to each value + + + + + Pointwise applies the sign function to each value + + The vector to store the result + + + + Pointwise applies the sin function to each value + + + + + Pointwise applies the sin function to each value + + The vector to store the result + + + + Pointwise applies the sinh function to each value + + + + + Pointwise applies the sinh function to each value + + The vector to store the result + + + + Pointwise applies the sqrt function to each value + + + + + Pointwise applies the sqrt function to each value + + The vector to store the result + + + + Pointwise applies the tan function to each value + + + + + Pointwise applies the tan function to each value + + The vector to store the result + + + + Pointwise applies the tanh function to each value + + + + + Pointwise applies the tanh function to each value + + The vector to store the result + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + + Calculates the rank of the matrix. + + effective numerical rank, obtained from SVD + + + + Calculates the nullity of the matrix. + + effective numerical nullity, obtained from SVD + + + Calculates the condition number of this matrix. + The condition number of the matrix. + The condition number is calculated using singular value decomposition. + + + Computes the determinant of this matrix. + The determinant of this matrix. + + + + Computes an orthonormal basis for the null space of this matrix, + also known as the kernel of the corresponding matrix transformation. + + + + + Computes an orthonormal basis for the column space of this matrix, + also known as the range or image of the corresponding matrix transformation. + + + + Computes the inverse of this matrix. + The inverse of this matrix. + + + Computes the Moore-Penrose Pseudo-Inverse of this matrix. + + + + Computes the Kronecker product of this matrix with the given matrix. The new matrix is M-by-N + with M = this.Rows * lower.Rows and N = this.Columns * lower.Columns. + + The other matrix. + The Kronecker product of the two matrices. + + + + Computes the Kronecker product of this matrix with the given matrix. The new matrix is M-by-N + with M = this.Rows * lower.Rows and N = this.Columns * lower.Columns. + + The other matrix. + The Kronecker product of the two matrices. + If the result matrix's dimensions are not (this.Rows * lower.rows) x (this.Columns * lower.Columns). + + + + Pointwise applies the minimum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the minimum with a scalar to each value. + + The scalar value to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the maximum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the maximum with a scalar to each value. + + The scalar value to compare to. + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the absolute minimum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the absolute minimum with a scalar to each value. + + The scalar value to compare to. + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the absolute maximum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the absolute maximum with a scalar to each value. + + The scalar value to compare to. + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the minimum with the values of another matrix to each value. + + The matrix with the values to compare to. + + + + Pointwise applies the minimum with the values of another matrix to each value. + + The matrix with the values to compare to. + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the maximum with the values of another matrix to each value. + + The matrix with the values to compare to. + + + + Pointwise applies the maximum with the values of another matrix to each value. + + The matrix with the values to compare to. + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the absolute minimum with the values of another matrix to each value. + + The matrix with the values to compare to. + + + + Pointwise applies the absolute minimum with the values of another matrix to each value. + + The matrix with the values to compare to. + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the absolute maximum with the values of another matrix to each value. + + The matrix with the values to compare to. + + + + Pointwise applies the absolute maximum with the values of another matrix to each value. + + The matrix with the values to compare to. + The matrix to store the result. + If this matrix and are not the same size. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced L2 norm of the matrix. + The largest singular value of the matrix. + + For sparse matrices, the L2 norm is computed using a dense implementation of singular value decomposition. + In a later release, it will be replaced with a sparse implementation. + + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Calculates the p-norms of all row vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the p-norms of all column vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all row vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all column vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the value sum of each row vector. + + + + + Calculates the value sum of each column vector. + + + + + Calculates the absolute value sum of each row vector. + + + + + Calculates the absolute value sum of each column vector. + + + + + Indicates whether the current object is equal to another object of the same type. + + + An object to compare with this object. + + + true if the current object is equal to the parameter; otherwise, false. + + + + + Determines whether the specified is equal to this instance. + + The to compare with this instance. + + true if the specified is equal to this instance; otherwise, false. + + + + + Returns a hash code for this instance. + + + A hash code for this instance, suitable for use in hashing algorithms and data structures like a hash table. + + + + + Returns a string that describes the type, dimensions and shape of this matrix. + + + + + Returns a string 2D array that summarizes the content of this matrix. + + + + + Returns a string 2D array that summarizes the content of this matrix. + + + + + Returns a string that summarizes the content of this matrix. + + + + + Returns a string that summarizes the content of this matrix. + + + + + Returns a string that summarizes this matrix. + + + + + Returns a string that summarizes this matrix. + The maximum number of cells can be configured in the class. + + + + + Returns a string that summarizes this matrix. + The maximum number of cells can be configured in the class. + The format string is ignored. + + + + + Initializes a new instance of the Matrix class. + + + + + Gets the raw matrix data storage. + + + + + Gets the number of columns. + + The number of columns. + + + + Gets the number of rows. + + The number of rows. + + + + Gets or sets the value at the given row and column, with range checking. + + + The row of the element. + + + The column of the element. + + The value to get or set. + This method is ranged checked. and + to get and set values without range checking. + + + + Retrieves the requested element without range checking. + + + The row of the element. + + + The column of the element. + + + The requested element. + + + + + Sets the value of the given element without range checking. + + + The row of the element. + + + The column of the element. + + + The value to set the element to. + + + + + Sets all values to zero. + + + + + Sets all values of a row to zero. + + + + + Sets all values of a column to zero. + + + + + Sets all values for all of the chosen rows to zero. + + + + + Sets all values for all of the chosen columns to zero. + + + + + Sets all values of a sub-matrix to zero. + + + + + Set all values whose absolute value is smaller than the threshold to zero, in-place. + + + + + Set all values that meet the predicate to zero, in-place. + + + + + Creates a clone of this instance. + + + A clone of the instance. + + + + + Copies the elements of this matrix to the given matrix. + + + The matrix to copy values into. + + + If target is . + + + If this and the target matrix do not have the same dimensions.. + + + + + Copies a row into an Vector. + + The row to copy. + A Vector containing the copied elements. + If is negative, + or greater than or equal to the number of rows. + + + + Copies a row into to the given Vector. + + The row to copy. + The Vector to copy the row into. + If the result vector is . + If is negative, + or greater than or equal to the number of rows. + If this.Columns != result.Count. + + + + Copies the requested row elements into a new Vector. + + The row to copy elements from. + The column to start copying from. + The number of elements to copy. + A Vector containing the requested elements. + If: + is negative, + or greater than or equal to the number of rows. + is negative, + or greater than or equal to the number of columns. + (columnIndex + length) >= Columns. + If is not positive. + + + + Copies the requested row elements into a new Vector. + + The row to copy elements from. + The column to start copying from. + The number of elements to copy. + The Vector to copy the column into. + If the result Vector is . + If is negative, + or greater than or equal to the number of columns. + If is negative, + or greater than or equal to the number of rows. + If + + is greater than or equal to the number of rows. + If is not positive. + If result.Count < length. + + + + Copies a column into a new Vector>. + + The column to copy. + A Vector containing the copied elements. + If is negative, + or greater than or equal to the number of columns. + + + + Copies a column into to the given Vector. + + The column to copy. + The Vector to copy the column into. + If the result Vector is . + If is negative, + or greater than or equal to the number of columns. + If this.Rows != result.Count. + + + + Copies the requested column elements into a new Vector. + + The column to copy elements from. + The row to start copying from. + The number of elements to copy. + A Vector containing the requested elements. + If: + is negative, + or greater than or equal to the number of columns. + is negative, + or greater than or equal to the number of rows. + (rowIndex + length) >= Rows. + + If is not positive. + + + + Copies the requested column elements into the given vector. + + The column to copy elements from. + The row to start copying from. + The number of elements to copy. + The Vector to copy the column into. + If the result Vector is . + If is negative, + or greater than or equal to the number of columns. + If is negative, + or greater than or equal to the number of rows. + If + + is greater than or equal to the number of rows. + If is not positive. + If result.Count < length. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Creates a matrix that contains the values from the requested sub-matrix. + + The row to start copying from. + The number of rows to copy. Must be positive. + The column to start copying from. + The number of columns to copy. Must be positive. + The requested sub-matrix. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + If or + is not positive. + + + + Returns the elements of the diagonal in a Vector. + + The elements of the diagonal. + For non-square matrices, the method returns Min(Rows, Columns) elements where + i == j (i is the row index, and j is the column index). + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Creates a new matrix and inserts the given column at the given index. + + The index of where to insert the column. + The column to insert. + A new matrix with the inserted column. + If is . + If is < zero or > the number of columns. + If the size of != the number of rows. + + + + Creates a new matrix with the given column removed. + + The index of the column to remove. + A new matrix without the chosen column. + If is < zero or >= the number of columns. + + + + Copies the values of the given Vector to the specified column. + + The column to copy the values to. + The vector to copy the values from. + If is . + If is less than zero, + or greater than or equal to the number of columns. + If the size of does not + equal the number of rows of this Matrix. + + + + Copies the values of the given Vector to the specified sub-column. + + The column to copy the values to. + The row to start copying to. + The number of elements to copy. + The vector to copy the values from. + If is . + If is less than zero, + or greater than or equal to the number of columns. + If the size of does not + equal the number of rows of this Matrix. + + + + Copies the values of the given array to the specified column. + + The column to copy the values to. + The array to copy the values from. + If is . + If is less than zero, + or greater than or equal to the number of columns. + If the size of does not + equal the number of rows of this Matrix. + If the size of does not + equal the number of rows of this Matrix. + + + + Creates a new matrix and inserts the given row at the given index. + + The index of where to insert the row. + The row to insert. + A new matrix with the inserted column. + If is . + If is < zero or > the number of rows. + If the size of != the number of columns. + + + + Creates a new matrix with the given row removed. + + The index of the row to remove. + A new matrix without the chosen row. + If is < zero or >= the number of rows. + + + + Copies the values of the given Vector to the specified row. + + The row to copy the values to. + The vector to copy the values from. + If is . + If is less than zero, + or greater than or equal to the number of rows. + If the size of does not + equal the number of columns of this Matrix. + + + + Copies the values of the given Vector to the specified sub-row. + + The row to copy the values to. + The column to start copying to. + The number of elements to copy. + The vector to copy the values from. + If is . + If is less than zero, + or greater than or equal to the number of rows. + If the size of does not + equal the number of columns of this Matrix. + + + + Copies the values of the given array to the specified row. + + The row to copy the values to. + The array to copy the values from. + If is . + If is less than zero, + or greater than or equal to the number of rows. + If the size of does not + equal the number of columns of this Matrix. + + + + Copies the values of a given matrix into a region in this matrix. + + The row to start copying to. + The column to start copying to. + The sub-matrix to copy from. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + + + + Copies the values of a given matrix into a region in this matrix. + + The row to start copying to. + The number of rows to copy. Must be positive. + The column to start copying to. + The number of columns to copy. Must be positive. + The sub-matrix to copy from. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + the size of is not at least x . + If or + is not positive. + + + + Copies the values of a given matrix into a region in this matrix. + + The row to start copying to. + The row of the sub-matrix to start copying from. + The number of rows to copy. Must be positive. + The column to start copying to. + The column of the sub-matrix to start copying from. + The number of columns to copy. Must be positive. + The sub-matrix to copy from. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + the size of is not at least x . + If or + is not positive. + + + + Copies the values of the given Vector to the diagonal. + + The vector to copy the values from. The length of the vector should be + Min(Rows, Columns). + If is . + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + + Copies the values of the given array to the diagonal. + + The array to copy the values from. The length of the vector should be + Min(Rows, Columns). + If is . + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + + Returns the transpose of this matrix. + + The transpose of this matrix. + + + + Puts the transpose of this matrix into the result matrix. + + + + + Returns the conjugate transpose of this matrix. + + The conjugate transpose of this matrix. + + + + Puts the conjugate transpose of this matrix into the result matrix. + + + + + Permute the rows of a matrix according to a permutation. + + The row permutation to apply to this matrix. + + + + Permute the columns of a matrix according to a permutation. + + The column permutation to apply to this matrix. + + + + Concatenates this matrix with the given matrix. + + The matrix to concatenate. + The combined matrix. + + + + + + Concatenates this matrix with the given matrix and places the result into the result matrix. + + The matrix to concatenate. + The combined matrix. + + + + + + Stacks this matrix on top of the given matrix and places the result into the result matrix. + + The matrix to stack this matrix upon. + The combined matrix. + If lower is . + If upper.Columns != lower.Columns. + + + + + + Stacks this matrix on top of the given matrix and places the result into the result matrix. + + The matrix to stack this matrix upon. + The combined matrix. + If lower is . + If upper.Columns != lower.Columns. + + + + + + Diagonally stacks his matrix on top of the given matrix. The new matrix is a M-by-N matrix, + where M = this.Rows + lower.Rows and N = this.Columns + lower.Columns. + The values of off the off diagonal matrices/blocks are set to zero. + + The lower, right matrix. + If lower is . + the combined matrix + + + + + + Diagonally stacks his matrix on top of the given matrix and places the combined matrix into the result matrix. + + The lower, right matrix. + The combined matrix + If lower is . + If the result matrix is . + If the result matrix's dimensions are not (this.Rows + lower.rows) x (this.Columns + lower.Columns). + + + + + + Evaluates whether this matrix is symmetric. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + Evaluates whether this matrix is conjugate symmetric. + + + + + Returns this matrix as a multidimensional array. + The returned array will be independent from this matrix. + A new memory block will be allocated for the array. + + A multidimensional containing the values of this matrix. + + + + Returns the matrix's elements as an array with the data laid out column by column (column major). + The returned array will be independent from this matrix. + A new memory block will be allocated for the array. + +
+            1, 2, 3
+            4, 5, 6  will be returned as  1, 4, 7, 2, 5, 8, 3, 6, 9
+            7, 8, 9
+            
+ An array containing the matrix's elements. + + +
+ + + Returns the matrix's elements as an array with the data laid row by row (row major). + The returned array will be independent from this matrix. + A new memory block will be allocated for the array. + +
+            1, 2, 3
+            4, 5, 6  will be returned as  1, 2, 3, 4, 5, 6, 7, 8, 9
+            7, 8, 9
+            
+ An array containing the matrix's elements. + + +
+ + + Returns this matrix as array of row arrays. + The returned arrays will be independent from this matrix. + A new memory block will be allocated for the arrays. + + + + + Returns this matrix as array of column arrays. + The returned arrays will be independent from this matrix. + A new memory block will be allocated for the arrays. + + + + + Returns the internal multidimensional array of this matrix if, and only if, this matrix is stored by such an array internally. + Otherwise returns null. Changes to the returned array and the matrix will affect each other. + Use ToArray instead if you always need an independent array. + + + + + Returns the internal column by column (column major) array of this matrix if, and only if, this matrix is stored by such arrays internally. + Otherwise returns null. Changes to the returned arrays and the matrix will affect each other. + Use ToColumnMajorArray instead if you always need an independent array. + +
+            1, 2, 3
+            4, 5, 6  will be returned as  1, 4, 7, 2, 5, 8, 3, 6, 9
+            7, 8, 9
+            
+ An array containing the matrix's elements. + + +
+ + + Returns the internal row by row (row major) array of this matrix if, and only if, this matrix is stored by such arrays internally. + Otherwise returns null. Changes to the returned arrays and the matrix will affect each other. + Use ToRowMajorArray instead if you always need an independent array. + +
+            1, 2, 3
+            4, 5, 6  will be returned as  1, 2, 3, 4, 5, 6, 7, 8, 9
+            7, 8, 9
+            
+ An array containing the matrix's elements. + + +
+ + + Returns the internal row arrays of this matrix if, and only if, this matrix is stored by such arrays internally. + Otherwise returns null. Changes to the returned arrays and the matrix will affect each other. + Use ToRowArrays instead if you always need an independent array. + + + + + Returns the internal column arrays of this matrix if, and only if, this matrix is stored by such arrays internally. + Otherwise returns null. Changes to the returned arrays and the matrix will affect each other. + Use ToColumnArrays instead if you always need an independent array. + + + + + Returns an IEnumerable that can be used to iterate through all values of the matrix. + + + The enumerator will include all values, even if they are zero. + The ordering of the values is unspecified (not necessarily column-wise or row-wise). + + + + + Returns an IEnumerable that can be used to iterate through all values of the matrix. + + + The enumerator will include all values, even if they are zero. + The ordering of the values is unspecified (not necessarily column-wise or row-wise). + + + + + Returns an IEnumerable that can be used to iterate through all values of the matrix and their index. + + + The enumerator returns a Tuple with the first two values being the row and column index + and the third value being the value of the element at that index. + The enumerator will include all values, even if they are zero. + + + + + Returns an IEnumerable that can be used to iterate through all values of the matrix and their index. + + + The enumerator returns a Tuple with the first two values being the row and column index + and the third value being the value of the element at that index. + The enumerator will include all values, even if they are zero. + + + + + Returns an IEnumerable that can be used to iterate through all non-zero values of the matrix. + + + The enumerator will skip all elements with a zero value. + + + + + Returns an IEnumerable that can be used to iterate through all non-zero values of the matrix and their index. + + + The enumerator returns a Tuple with the first two values being the row and column index + and the third value being the value of the element at that index. + The enumerator will skip all elements with a zero value. + + + + + Returns an IEnumerable that can be used to iterate through all columns of the matrix. + + + + + Returns an IEnumerable that can be used to iterate through a subset of all columns of the matrix. + + The column to start enumerating over. + The number of columns to enumerating over. + + + + Returns an IEnumerable that can be used to iterate through all columns of the matrix and their index. + + + The enumerator returns a Tuple with the first value being the column index + and the second value being the value of the column at that index. + + + + + Returns an IEnumerable that can be used to iterate through a subset of all columns of the matrix and their index. + + The column to start enumerating over. + The number of columns to enumerating over. + + The enumerator returns a Tuple with the first value being the column index + and the second value being the value of the column at that index. + + + + + Returns an IEnumerable that can be used to iterate through all rows of the matrix. + + + + + Returns an IEnumerable that can be used to iterate through a subset of all rows of the matrix. + + The row to start enumerating over. + The number of rows to enumerating over. + + + + Returns an IEnumerable that can be used to iterate through all rows of the matrix and their index. + + + The enumerator returns a Tuple with the first value being the row index + and the second value being the value of the row at that index. + + + + + Returns an IEnumerable that can be used to iterate through a subset of all rows of the matrix and their index. + + The row to start enumerating over. + The number of rows to enumerating over. + + The enumerator returns a Tuple with the first value being the row index + and the second value being the value of the row at that index. + + + + + Applies a function to each value of this matrix and replaces the value with its result. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + Applies a function to each value of this matrix and replaces the value with its result. + The row and column indices of each value (zero-based) are passed as first arguments to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + Applies a function to each value of this matrix and replaces the value in the result matrix. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + Applies a function to each value of this matrix and replaces the value in the result matrix. + The index of each value (zero-based) is passed as first argument to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + Applies a function to each value of this matrix and replaces the value in the result matrix. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + Applies a function to each value of this matrix and replaces the value in the result matrix. + The index of each value (zero-based) is passed as first argument to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + Applies a function to each value of this matrix and returns the results as a new matrix. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + Applies a function to each value of this matrix and returns the results as a new matrix. + The index of each value (zero-based) is passed as first argument to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + For each row, applies a function f to each element of the row, threading an accumulator argument through the computation. + Returns an array with the resulting accumulator states for each row. + + + + + For each column, applies a function f to each element of the column, threading an accumulator argument through the computation. + Returns an array with the resulting accumulator states for each column. + + + + + Applies a function f to each row vector, threading an accumulator vector argument through the computation. + Returns the resulting accumulator vector. + + + + + Applies a function f to each column vector, threading an accumulator vector argument through the computation. + Returns the resulting accumulator vector. + + + + + Reduces all row vectors by applying a function between two of them, until only a single vector is left. + + + + + Reduces all column vectors by applying a function between two of them, until only a single vector is left. + + + + + Applies a function to each value pair of two matrices and replaces the value in the result vector. + + + + + Applies a function to each value pair of two matrices and returns the results as a new vector. + + + + + Applies a function to update the status with each value pair of two matrices and returns the resulting status. + + + + + Returns a tuple with the index and value of the first element satisfying a predicate, or null if none is found. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns a tuple with the index and values of the first element pair of two matrices of the same size satisfying a predicate, or null if none is found. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if at least one element satisfies a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if at least one element pairs of two matrices of the same size satisfies a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if all elements satisfy a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if all element pairs of two matrices of the same size satisfy a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Adds a scalar to each element of the matrix. + + This operator will allocate new memory for the result. It will + choose the representation of the provided matrix. + The left matrix to add. + The scalar value to add. + The result of the addition. + If is . + + + + Adds a scalar to each element of the matrix. + + This operator will allocate new memory for the result. It will + choose the representation of the provided matrix. + The scalar value to add. + The right matrix to add. + The result of the addition. + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the subtraction. + If and don't have the same dimensions. + If or is . + + + + Subtracts a scalar from each element of a matrix. + + This operator will allocate new memory for the result. It will + choose the representation of the provided matrix. + The left matrix to subtract. + The scalar value to subtract. + The result of the subtraction. + If and don't have the same dimensions. + If or is . + + + + Substracts each element of a matrix from a scalar. + + This operator will allocate new memory for the result. It will + choose the representation of the provided matrix. + The scalar value to subtract. + The right matrix to subtract. + The result of the subtraction. + If and don't have the same dimensions. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Divides a scalar with a matrix. + + The scalar to divide. + The matrix. + The result of the division. + If is . + + + + Divides a matrix with a scalar. + + The matrix to divide. + The scalar value. + The result of the division. + If is . + + + + Computes the pointwise remainder (% operator), where the result has the sign of the dividend, + of each element of the matrix of the given divisor. + + The matrix whose elements we want to compute the modulus of. + The divisor to use. + The result of the calculation + If is . + + + + Computes the pointwise remainder (% operator), where the result has the sign of the dividend, + of the given dividend of each element of the matrix. + + The dividend we want to compute the modulus of. + The matrix whose elements we want to use as divisor. + The result of the calculation + If is . + + + + Computes the pointwise remainder (% operator), where the result has the sign of the dividend, + of each element of two matrices. + + The matrix whose elements we want to compute the remainder of. + The divisor to use. + If and are not the same size. + If is . + + + + Computes the sqrt of a matrix pointwise + + The input matrix + + + + + Computes the exponential of a matrix pointwise + + The input matrix + + + + + Computes the log of a matrix pointwise + + The input matrix + + + + + Computes the log10 of a matrix pointwise + + The input matrix + + + + + Computes the sin of a matrix pointwise + + The input matrix + + + + + Computes the cos of a matrix pointwise + + The input matrix + + + + + Computes the tan of a matrix pointwise + + The input matrix + + + + + Computes the asin of a matrix pointwise + + The input matrix + + + + + Computes the acos of a matrix pointwise + + The input matrix + + + + + Computes the atan of a matrix pointwise + + The input matrix + + + + + Computes the sinh of a matrix pointwise + + The input matrix + + + + + Computes the cosh of a matrix pointwise + + The input matrix + + + + + Computes the tanh of a matrix pointwise + + The input matrix + + + + + Computes the absolute value of a matrix pointwise + + The input matrix + + + + + Computes the floor of a matrix pointwise + + The input matrix + + + + + Computes the ceiling of a matrix pointwise + + The input matrix + + + + + Computes the rounded value of a matrix pointwise + + The input matrix + + + + + Computes the Cholesky decomposition for a matrix. + + The Cholesky decomposition object. + + + + Computes the LU decomposition for a matrix. + + The LU decomposition object. + + + + Computes the QR decomposition for a matrix. + + The type of QR factorization to perform. + The QR decomposition object. + + + + Computes the QR decomposition for a matrix using Modified Gram-Schmidt Orthogonalization. + + The QR decomposition object. + + + + Computes the SVD decomposition for a matrix. + + Compute the singular U and VT vectors or not. + The SVD decomposition object. + + + + Computes the EVD decomposition for a matrix. + + The EVD decomposition object. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix (this matrix), b is the solution vector and x is the unknown vector. + + The solution vector b. + The result vector x. + The iterative solver to use. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + Solves the matrix equation AX = B, where A is the coefficient matrix (this matrix), B is the solution matrix and X is the unknown matrix. + + The solution matrix B. + The result matrix X + The iterative solver to use. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix (this matrix), b is the solution vector and x is the unknown vector. + + The solution vector b. + The result vector x. + The iterative solver to use. + Criteria to control when to stop iterating. + The preconditioner to use for approximations. + + + + Solves the matrix equation AX = B, where A is the coefficient matrix (this matrix), B is the solution matrix and X is the unknown matrix. + + The solution matrix B. + The result matrix X + The iterative solver to use. + Criteria to control when to stop iterating. + The preconditioner to use for approximations. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix (this matrix), b is the solution vector and x is the unknown vector. + + The solution vector b. + The result vector x. + The iterative solver to use. + Criteria to control when to stop iterating. + + + + Solves the matrix equation AX = B, where A is the coefficient matrix (this matrix), B is the solution matrix and X is the unknown matrix. + + The solution matrix B. + The result matrix X + The iterative solver to use. + Criteria to control when to stop iterating. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix (this matrix), b is the solution vector and x is the unknown vector. + + The solution vector b. + The iterative solver to use. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + The result vector x. + + + + Solves the matrix equation AX = B, where A is the coefficient matrix (this matrix), B is the solution matrix and X is the unknown matrix. + + The solution matrix B. + The iterative solver to use. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + The result matrix X. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix (this matrix), b is the solution vector and x is the unknown vector. + + The solution vector b. + The iterative solver to use. + Criteria to control when to stop iterating. + The preconditioner to use for approximations. + The result vector x. + + + + Solves the matrix equation AX = B, where A is the coefficient matrix (this matrix), B is the solution matrix and X is the unknown matrix. + + The solution matrix B. + The iterative solver to use. + Criteria to control when to stop iterating. + The preconditioner to use for approximations. + The result matrix X. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix (this matrix), b is the solution vector and x is the unknown vector. + + The solution vector b. + The iterative solver to use. + Criteria to control when to stop iterating. + The result vector x. + + + + Solves the matrix equation AX = B, where A is the coefficient matrix (this matrix), B is the solution matrix and X is the unknown matrix. + + The solution matrix B. + The iterative solver to use. + Criteria to control when to stop iterating. + The result matrix X. + + + + Converts a matrix to single precision. + + + + + Converts a matrix to double precision. + + + + + Converts a matrix to single precision complex numbers. + + + + + Converts a matrix to double precision complex numbers. + + + + + Gets a single precision complex matrix with the real parts from the given matrix. + + + + + Gets a double precision complex matrix with the real parts from the given matrix. + + + + + Gets a real matrix representing the real parts of a complex matrix. + + + + + Gets a real matrix representing the real parts of a complex matrix. + + + + + Gets a real matrix representing the imaginary parts of a complex matrix. + + + + + Gets a real matrix representing the imaginary parts of a complex matrix. + + + + + Existing data may not be all zeros, so clearing may be necessary + if not all of it will be overwritten anyway. + + + + + If existing data is assumed to be all zeros already, + clearing it may be skipped if applicable. + + + + + Allow skipping zero entries (without enforcing skipping them). + When enumerating sparse matrices this can significantly speed up operations. + + + + + Force applying the operation to all fields even if they are zero. + + + + + It is not known yet whether a matrix is symmetric or not. + + + + + A matrix is symmetric + + + + + A matrix is hermitian (conjugate symmetric). + + + + + A matrix is not symmetric + + + + + Defines an that uses a cancellation token as stop criterion. + + + + + Initializes a new instance of the class. + + + + + Initializes a new instance of the class. + + + + + Determines the status of the iterative calculation based on the stop criteria stored + by the current . Result is set into Status field. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual stop criteria may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Gets the current calculation status. + + + + + Resets the to the pre-calculation state. + + + + + Clones the current and its settings. + + A new instance of the class. + + + + Stop criterion that delegates the status determination to a delegate. + + + + + Create a new instance of this criterion with a custom implementation. + + Custom implementation with the same signature and semantics as the DetermineStatus method. + + + + Determines the status of the iterative calculation by delegating it to the provided delegate. + Result is set into Status field. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual stop criteria may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Gets the current calculation status. + + + + + Resets the IIterationStopCriterion to the pre-calculation state. + + + + + Clones this criterion and its settings. + + + + + Monitors an iterative calculation for signs of divergence. + + + + + The maximum relative increase the residual may experience without triggering a divergence warning. + + + + + The number of iterations over which a residual increase should be tracked before issuing a divergence warning. + + + + + The status of the calculation + + + + + The array that holds the tracking information. + + + + + The iteration number of the last iteration. + + + + + Initializes a new instance of the class with the specified maximum + relative increase and the specified minimum number of tracking iterations. + + The maximum relative increase that the residual may experience before a divergence warning is issued. + The minimum number of iterations over which the residual must grow before a divergence warning is issued. + + + + Gets or sets the maximum relative increase that the residual may experience before a divergence warning is issued. + + Thrown if the Maximum is set to zero or below. + + + + Gets or sets the minimum number of iterations over which the residual must grow before + issuing a divergence warning. + + Thrown if the value is set to less than one. + + + + Determines the status of the iterative calculation based on the stop criteria stored + by the current . Result is set into Status field. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual stop criteria may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Detect if solution is diverging + + true if diverging, otherwise false + + + + Gets required history Length + + + + + Gets the current calculation status. + + + + + Resets the to the pre-calculation state. + + + + + Clones the current and its settings. + + A new instance of the class. + + + + Defines an that monitors residuals for NaN's. + + + + + The status of the calculation + + + + + The iteration number of the last iteration. + + + + + Determines the status of the iterative calculation based on the stop criteria stored + by the current . Result is set into Status field. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual stop criteria may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Gets the current calculation status. + + + + + Resets the to the pre-calculation state. + + + + + Clones the current and its settings. + + A new instance of the class. + + + + The base interface for classes that provide stop criteria for iterative calculations. + + + + + Determines the status of the iterative calculation based on the stop criteria stored + by the current IIterationStopCriterion. Status is set to Status field of current object. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual stop criteria may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Gets the current calculation status. + + is not a legal value. Status should be set in implementation. + + + + Resets the IIterationStopCriterion to the pre-calculation state. + + To implementers: Invoking this method should not clear the user defined + property values, only the state that is used to track the progress of the + calculation. + + + + Defines the interface for classes that solve the matrix equation Ax = b in + an iterative manner. + + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + Defines the interface for objects that can create an iterative solver with + specific settings. This interface is used to pass iterative solver creation + setup information around. + + + + + Gets the type of the solver that will be created by this setup object. + + + + + Gets type of preconditioner, if any, that will be created by this setup object. + + + + + Creates the iterative solver to be used. + + + + + Creates the preconditioner to be used by default (can be overwritten). + + + + + Gets the relative speed of the solver. + + Returns a value between 0 and 1, inclusive. + + + + Gets the relative reliability of the solver. + + Returns a value between 0 and 1 inclusive. + + + + The base interface for preconditioner classes. + + + + Preconditioners are used by iterative solvers to improve the convergence + speed of the solving process. Increase in convergence speed + is related to the number of iterations necessary to get a converged solution. + So while in general the use of a preconditioner means that the iterative + solver will perform fewer iterations it does not guarantee that the actual + solution time decreases given that some preconditioners can be expensive to + setup and run. + + + Note that in general changes to the matrix will invalidate the preconditioner + if the changes occur after creating the preconditioner. + + + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix on which the preconditioner is based. + + + + Approximates the solution to the matrix equation Mx = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + Defines an that monitors the numbers of iteration + steps as stop criterion. + + + + + The default value for the maximum number of iterations the process is allowed + to perform. + + + + + The maximum number of iterations the calculation is allowed to perform. + + + + + The status of the calculation + + + + + Initializes a new instance of the class with the default maximum + number of iterations. + + + + + Initializes a new instance of the class with the specified maximum + number of iterations. + + The maximum number of iterations the calculation is allowed to perform. + + + + Gets or sets the maximum number of iterations the calculation is allowed to perform. + + Thrown if the Maximum is set to a negative value. + + + + Returns the maximum number of iterations to the default. + + + + + Determines the status of the iterative calculation based on the stop criteria stored + by the current . Result is set into Status field. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual stop criteria may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Gets the current calculation status. + + + + + Resets the to the pre-calculation state. + + + + + Clones the current and its settings. + + A new instance of the class. + + + + Iterative Calculation Status + + + + + An iterator that is used to check if an iterative calculation should continue or stop. + + + + + The collection that holds all the stop criteria and the flag indicating if they should be added + to the child iterators. + + + + + The status of the iterator. + + + + + Initializes a new instance of the class with the default stop criteria. + + + + + Initializes a new instance of the class with the specified stop criteria. + + + The specified stop criteria. Only one stop criterion of each type can be passed in. None + of the stop criteria will be passed on to child iterators. + + + + + Initializes a new instance of the class with the specified stop criteria. + + + The specified stop criteria. Only one stop criterion of each type can be passed in. None + of the stop criteria will be passed on to child iterators. + + + + + Gets the current calculation status. + + + + + Determines the status of the iterative calculation based on the stop criteria stored + by the current . Result is set into Status field. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual iterators may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Indicates to the iterator that the iterative process has been cancelled. + + + Does not reset the stop-criteria. + + + + + Resets the to the pre-calculation state. + + + + + Creates a deep clone of the current iterator. + + The deep clone of the current iterator. + + + + Defines an that monitors residuals as stop criterion. + + + + + The maximum value for the residual below which the calculation is considered converged. + + + + + The minimum number of iterations for which the residual has to be below the maximum before + the calculation is considered converged. + + + + + The status of the calculation + + + + + The number of iterations since the residuals got below the maximum. + + + + + The iteration number of the last iteration. + + + + + Initializes a new instance of the class with the specified + maximum residual and minimum number of iterations. + + + The maximum value for the residual below which the calculation is considered converged. + + + The minimum number of iterations for which the residual has to be below the maximum before + the calculation is considered converged. + + + + + Gets or sets the maximum value for the residual below which the calculation is considered + converged. + + Thrown if the Maximum is set to a negative value. + + + + Gets or sets the minimum number of iterations for which the residual has to be + below the maximum before the calculation is considered converged. + + Thrown if the BelowMaximumFor is set to a value less than 1. + + + + Determines the status of the iterative calculation based on the stop criteria stored + by the current . Result is set into Status field. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual stop criteria may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Gets the current calculation status. + + + + + Resets the to the pre-calculation state. + + + + + Clones the current and its settings. + + A new instance of the class. + + + + Loads the available objects from the specified assembly. + + The assembly which will be searched for setup objects. + If true, types that fail to load are simply ignored. Otherwise the exception is rethrown. + The types that should not be loaded. + + + + Loads the available objects from the specified assembly. + + The type in the assembly which should be searched for setup objects. + If true, types that fail to load are simply ignored. Otherwise the exception is rethrown. + The types that should not be loaded. + + + + Loads the available objects from the specified assembly. + + The of the assembly that should be searched for setup objects. + If true, types that fail to load are simply ignored. Otherwise the exception is rethrown. + The types that should not be loaded. + + + + Loads the available objects from the Math.NET Numerics assembly. + + The types that should not be loaded. + + + + Loads the available objects from the Math.NET Numerics assembly. + + + + + A unit preconditioner. This preconditioner does not actually do anything + it is only used when running an without + a preconditioner. + + + + + The coefficient matrix on which this preconditioner operates. + Is used to check dimensions on the different vectors that are processed. + + + + + Initializes the preconditioner and loads the internal data structures. + + + The matrix upon which the preconditioner is based. + + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + If and do not have the same size. + + + - or - + + + If the size of is different the number of rows of the coefficient matrix. + + + + + + True if the matrix storage format is dense. + + + + + True if all fields of this matrix can be set to any value. + False if some fields are fixed, like on a diagonal matrix. + + + + + True if the specified field can be set to any value. + False if the field is fixed, like an off-diagonal field on a diagonal matrix. + + + + + Retrieves the requested element without range checking. + + + + + Sets the element without range checking. + + + + + Evaluate the row and column at a specific data index. + + + + + True if the vector storage format is dense. + + + + + Retrieves the requested element without range checking. + + + + + Sets the element without range checking. + + + + + True if the matrix storage format is dense. + + + + + True if all fields of this matrix can be set to any value. + False if some fields are fixed, like on a diagonal matrix. + + + + + True if the specified field can be set to any value. + False if the field is fixed, like an off-diagonal field on a diagonal matrix. + + + + + Retrieves the requested element without range checking. + + + + + Sets the element without range checking. + + + + + Returns a hash code for this instance. + + + A hash code for this instance, suitable for use in hashing algorithms and data structures like a hash table. + + + + + True if the matrix storage format is dense. + + + + + True if all fields of this matrix can be set to any value. + False if some fields are fixed, like on a diagonal matrix. + + + + + True if the specified field can be set to any value. + False if the field is fixed, like an off-diagonal field on a diagonal matrix. + + + + + Gets or sets the value at the given row and column, with range checking. + + + The row of the element. + + + The column of the element. + + The value to get or set. + This method is ranged checked. and + to get and set values without range checking. + + + + Retrieves the requested element without range checking. + + + The row of the element. + + + The column of the element. + + + The requested element. + + Not range-checked. + + + + Sets the element without range checking. + + The row of the element. + The column of the element. + The value to set the element to. + WARNING: This method is not thread safe. Use "lock" with it and be sure to avoid deadlocks. + + + + Indicates whether the current object is equal to another object of the same type. + + + An object to compare with this object. + + + true if the current object is equal to the parameter; otherwise, false. + + + + + Determines whether the specified is equal to the current . + + + true if the specified is equal to the current ; otherwise, false. + + The to compare with the current . + + + + Serves as a hash function for a particular type. + + + A hash code for the current . + + + + The state array will not be modified, unless it is the same instance as the target array (which is allowed). + + + The state array will not be modified, unless it is the same instance as the target array (which is allowed). + + + The state array will not be modified, unless it is the same instance as the target array (which is allowed). + + + The state array will not be modified, unless it is the same instance as the target array (which is allowed). + + + + The array containing the row indices of the existing rows. Element "i" of the array gives the index of the + element in the array that is first non-zero element in a row "i". + The last value is equal to ValueCount, so that the number of non-zero entries in row "i" is always + given by RowPointers[i+i] - RowPointers[i]. This array thus has length RowCount+1. + + + + + An array containing the column indices of the non-zero values. Element "j" of the array + is the number of the column in matrix that contains the j-th value in the array. + + + + + Array that contains the non-zero elements of matrix. Values of the non-zero elements of matrix are mapped into the values + array using the row-major storage mapping described in a compressed sparse row (CSR) format. + + + + + Gets the number of non zero elements in the matrix. + + The number of non zero elements. + + + + True if the matrix storage format is dense. + + + + + True if all fields of this matrix can be set to any value. + False if some fields are fixed, like on a diagonal matrix. + + + + + True if the specified field can be set to any value. + False if the field is fixed, like an off-diagonal field on a diagonal matrix. + + + + + Retrieves the requested element without range checking. + + + The row of the element. + + + The column of the element. + + + The requested element. + + Not range-checked. + + + + Sets the element without range checking. + + The row of the element. + The column of the element. + The value to set the element to. + WARNING: This method is not thread safe. Use "lock" with it and be sure to avoid deadlocks. + + + + Delete value from internal storage + + Index of value in nonZeroValues array + Row number of matrix + WARNING: This method is not thread safe. Use "lock" with it and be sure to avoid deadlocks + + + + Find item Index in nonZeroValues array + + Matrix row index + Matrix column index + Item index + WARNING: This method is not thread safe. Use "lock" with it and be sure to avoid deadlocks + + + + Calculates the amount with which to grow the storage array's if they need to be + increased in size. + + The amount grown. + + + + Returns a hash code for this instance. + + + A hash code for this instance, suitable for use in hashing algorithms and data structures like a hash table. + + + + + Array that contains the indices of the non-zero values. + + + + + Array that contains the non-zero elements of the vector. + + + + + Gets the number of non-zero elements in the vector. + + + + + True if the vector storage format is dense. + + + + + Retrieves the requested element without range checking. + + + + + Sets the element without range checking. + + + + + Calculates the amount with which to grow the storage array's if they need to be + increased in size. + + The amount grown. + + + + Returns a hash code for this instance. + + + A hash code for this instance, suitable for use in hashing algorithms and data structures like a hash table. + + + + + True if the vector storage format is dense. + + + + + Gets or sets the value at the given index, with range checking. + + + The index of the element. + + The value to get or set. + This method is ranged checked. and + to get and set values without range checking. + + + + Retrieves the requested element without range checking. + + The index of the element. + The requested element. + Not range-checked. + + + + Sets the element without range checking. + + The index of the element. + The value to set the element to. + WARNING: This method is not thread safe. Use "lock" with it and be sure to avoid deadlocks. + + + + Indicates whether the current object is equal to another object of the same type. + + + An object to compare with this object. + + + true if the current object is equal to the parameter; otherwise, false. + + + + + Determines whether the specified is equal to the current . + + + true if the specified is equal to the current ; otherwise, false. + + The to compare with the current . + + + + Serves as a hash function for a particular type. + + + A hash code for the current . + + + + + Defines the generic class for Vector classes. + + Supported data types are double, single, , and . + + + + The zero value for type T. + + + + + The value of 1.0 for type T. + + + + + Negates vector and save result to + + Target vector + + + + Complex conjugates vector and save result to + + Target vector + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + The scalar to add. + The vector to store the result of the addition. + + + + Adds another vector to this vector and stores the result into the result vector. + + The vector to add to this one. + The vector to store the result of the addition. + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The vector to store the result of the subtraction. + + + + Subtracts each element of the vector from a scalar and stores the result in the result vector. + + The scalar to subtract from. + The vector to store the result of the subtraction. + + + + Subtracts another vector to this vector and stores the result into the result vector. + + The vector to subtract from this one. + The vector to store the result of the subtraction. + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + The scalar to multiply. + The vector to store the result of the multiplication. + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Computes the outer product M[i,j] = u[i]*v[j] of this and another vector and stores the result in the result matrix. + + The other vector + The matrix to store the result of the product. + + + + Divides each element of the vector by a scalar and stores the result in the result vector. + + The scalar denominator to use. + The vector to store the result of the division. + + + + Divides a scalar by each element of the vector and stores the result in the result vector. + + The scalar numerator to use. + The vector to store the result of the division. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the division. + + + + Pointwise raise this vector to an exponent and store the result into the result vector. + + The exponent to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Adds a scalar to each element of the vector. + + The scalar to add. + A copy of the vector with the scalar added. + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + The scalar to add. + The vector to store the result of the addition. + If this vector and are not the same size. + + + + Adds another vector to this vector. + + The vector to add to this one. + A new vector containing the sum of both vectors. + If this vector and are not the same size. + + + + Adds another vector to this vector and stores the result into the result vector. + + The vector to add to this one. + The vector to store the result of the addition. + If this vector and are not the same size. + If this vector and are not the same size. + + + + Subtracts a scalar from each element of the vector. + + The scalar to subtract. + A new vector containing the subtraction of this vector and the scalar. + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The vector to store the result of the subtraction. + If this vector and are not the same size. + + + + Subtracts each element of the vector from a scalar. + + The scalar to subtract from. + A new vector containing the subtraction of the scalar and this vector. + + + + Subtracts each element of the vector from a scalar and stores the result in the result vector. + + The scalar to subtract from. + The vector to store the result of the subtraction. + If this vector and are not the same size. + + + + Returns a negated vector. + + The negated vector. + Added as an alternative to the unary negation operator. + + + + Negates vector and save result to + + Target vector + + + + Subtracts another vector from this vector. + + The vector to subtract from this one. + A new vector containing the subtraction of the the two vectors. + If this vector and are not the same size. + + + + Subtracts another vector to this vector and stores the result into the result vector. + + The vector to subtract from this one. + The vector to store the result of the subtraction. + If this vector and are not the same size. + If this vector and are not the same size. + + + + Return vector with complex conjugate values of the source vector + + Conjugated vector + + + + Complex conjugates vector and save result to + + Target vector + + + + Multiplies a scalar to each element of the vector. + + The scalar to multiply. + A new vector that is the multiplication of the vector and the scalar. + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + The scalar to multiply. + The vector to store the result of the multiplication. + If this vector and are not the same size. + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + If is not of the same size. + + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + If is not of the same size. + If is . + + + + + Divides each element of the vector by a scalar. + + The scalar to divide with. + A new vector that is the division of the vector and the scalar. + + + + Divides each element of the vector by a scalar and stores the result in the result vector. + + The scalar to divide with. + The vector to store the result of the division. + If this vector and are not the same size. + + + + Divides a scalar by each element of the vector. + + The scalar to divide. + A new vector that is the division of the vector and the scalar. + + + + Divides a scalar by each element of the vector and stores the result in the result vector. + + The scalar to divide. + The vector to store the result of the division. + If this vector and are not the same size. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector containing the result. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector containing the result. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (vector % divisor), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector containing the result. + + + + Computes the remainder (vector % divisor), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (dividend % vector), where the result has the sign of the dividend, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector containing the result. + + + + Computes the remainder (dividend % vector), where the result has the sign of the dividend, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Pointwise multiplies this vector with another vector. + + The vector to pointwise multiply with this one. + A new vector which is the pointwise multiplication of the two vectors. + If this vector and are not the same size. + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + If this vector and are not the same size. + If this vector and are not the same size. + + + + Pointwise divide this vector with another vector. + + The pointwise denominator vector to use. + A new vector which is the pointwise division of the two vectors. + If this vector and are not the same size. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The vector to store the result of the pointwise division. + If this vector and are not the same size. + If this vector and are not the same size. + + + + Pointwise raise this vector to an exponent. + + The exponent to raise this vector values to. + + + + Pointwise raise this vector to an exponent and store the result into the result vector. + + The exponent to raise this vector values to. + The matrix to store the result into. + If this vector and are not the same size. + + + + Pointwise raise this vector to an exponent and store the result into the result vector. + + The exponent to raise this vector values to. + + + + Pointwise raise this vector to an exponent. + + The exponent to raise this vector values to. + The vector to store the result into. + If this vector and are not the same size. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this vector with another vector. + + The pointwise denominator vector to use. + If this vector and are not the same size. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The vector to store the result of the pointwise modulus. + If this vector and are not the same size. + If this vector and are not the same size. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this vector with another vector. + + The pointwise denominator vector to use. + If this vector and are not the same size. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The vector to store the result of the pointwise remainder. + If this vector and are not the same size. + If this vector and are not the same size. + + + + Helper function to apply a unary function to a vector. The function + f modifies the vector given to it in place. Before its + called, a copy of the 'this' vector with the same dimension is + first created, then passed to f. The copy is returned as the result + + Function which takes a vector, modifies it in place and returns void + New instance of vector which is the result + + + + Helper function to apply a unary function which modifies a vector + in place. + + Function which takes a vector, modifies it in place and returns void + The vector where the result is to be stored + If this vector and are not the same size. + + + + Helper function to apply a binary function which takes a scalar and + a vector and modifies the latter in place. A copy of the "this" + vector is therefore first made and then passed to f together with + the scalar argument. The copy is then returned as the result + + Function which takes a scalar and a vector, modifies the vector in place and returns void + The scalar to be passed to the function + The resulting vector + + + + Helper function to apply a binary function which takes a scalar and + a vector, modifies the latter in place and returns void. + + Function which takes a scalar and a vector, modifies the vector in place and returns void + The scalar to be passed to the function + If this vector and are not the same size. + + + + Helper function to apply a binary function which takes two vectors + and modifies the latter in place. A copy of the "this" vector is + first made and then passed to f together with the other vector. The + copy is then returned as the result + + Function which takes two vectors, modifies the second in place and returns void + The other vector to be passed to the function as argument. It is not modified + The resulting vector + If this vector and are not the same size. + + + + Helper function to apply a binary function which takes two vectors + and modifies the second one in place + + Function which takes two vectors, modifies the second in place and returns void + The other vector to be passed to the function as argument. It is not modified + The resulting vector + If this vector and are not the same size. + + + + Pointwise applies the exponent function to each value. + + + + + Pointwise applies the exponent function to each value. + + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the natural logarithm function to each value. + + + + + Pointwise applies the natural logarithm function to each value. + + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the abs function to each value + + + + + Pointwise applies the abs function to each value + + The vector to store the result + + + + Pointwise applies the acos function to each value + + + + + Pointwise applies the acos function to each value + + The vector to store the result + + + + Pointwise applies the asin function to each value + + + + + Pointwise applies the asin function to each value + + The vector to store the result + + + + Pointwise applies the atan function to each value + + + + + Pointwise applies the atan function to each value + + The vector to store the result + + + + Pointwise applies the atan2 function to each value of the current + vector and a given other vector being the 'x' of atan2 and the + 'this' vector being the 'y' + + + + + + + Pointwise applies the atan2 function to each value of the current + vector and a given other vector being the 'x' of atan2 and the + 'this' vector being the 'y' + + + + + + + Pointwise applies the ceiling function to each value + + + + + Pointwise applies the ceiling function to each value + + The vector to store the result + + + + Pointwise applies the cos function to each value + + + + + Pointwise applies the cos function to each value + + The vector to store the result + + + + Pointwise applies the cosh function to each value + + + + + Pointwise applies the cosh function to each value + + The vector to store the result + + + + Pointwise applies the floor function to each value + + + + + Pointwise applies the floor function to each value + + The vector to store the result + + + + Pointwise applies the log10 function to each value + + + + + Pointwise applies the log10 function to each value + + The vector to store the result + + + + Pointwise applies the round function to each value + + + + + Pointwise applies the round function to each value + + The vector to store the result + + + + Pointwise applies the sign function to each value + + + + + Pointwise applies the sign function to each value + + The vector to store the result + + + + Pointwise applies the sin function to each value + + + + + Pointwise applies the sin function to each value + + The vector to store the result + + + + Pointwise applies the sinh function to each value + + + + + Pointwise applies the sinh function to each value + + The vector to store the result + + + + Pointwise applies the sqrt function to each value + + + + + Pointwise applies the sqrt function to each value + + The vector to store the result + + + + Pointwise applies the tan function to each value + + + + + Pointwise applies the tan function to each value + + The vector to store the result + + + + Pointwise applies the tanh function to each value + + + + + Pointwise applies the tanh function to each value + + The vector to store the result + + + + Computes the outer product M[i,j] = u[i]*v[j] of this and another vector. + + The other vector + + + + Computes the outer product M[i,j] = u[i]*v[j] of this and another vector and stores the result in the result matrix. + + The other vector + The matrix to store the result of the product. + + + + Pointwise applies the minimum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the minimum with a scalar to each value. + + The scalar value to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the maximum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the maximum with a scalar to each value. + + The scalar value to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the absolute minimum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the absolute minimum with a scalar to each value. + + The scalar value to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the absolute maximum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the absolute maximum with a scalar to each value. + + The scalar value to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the minimum with the values of another vector to each value. + + The vector with the values to compare to. + + + + Pointwise applies the minimum with the values of another vector to each value. + + The vector with the values to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the maximum with the values of another vector to each value. + + The vector with the values to compare to. + + + + Pointwise applies the maximum with the values of another vector to each value. + + The vector with the values to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the absolute minimum with the values of another vector to each value. + + The vector with the values to compare to. + + + + Pointwise applies the absolute minimum with the values of another vector to each value. + + The vector with the values to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the absolute maximum with the values of another vector to each value. + + The vector with the values to compare to. + + + + Pointwise applies the absolute maximum with the values of another vector to each value. + + The vector with the values to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = (sum(abs(this[i])^p))^(1/p) + + + + Normalizes this vector to a unit vector with respect to the p-norm. + + The p value. + This vector normalized to a unit vector with respect to the p-norm. + + + + Returns the value of the absolute minimum element. + + The value of the absolute minimum element. + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the value of the absolute maximum element. + + The value of the absolute maximum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Returns the value of maximum element. + + The value of maximum element. + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the value of the minimum element. + + The value of the minimum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Computes the sum of the absolute value of the vector's elements. + + The sum of the absolute value of the vector's elements. + + + + Indicates whether the current object is equal to another object of the same type. + + An object to compare with this object. + + true if the current object is equal to the parameter; otherwise, false. + + + + + Determines whether the specified is equal to this instance. + + The to compare with this instance. + + true if the specified is equal to this instance; otherwise, false. + + + + + Returns a hash code for this instance. + + + A hash code for this instance, suitable for use in hashing algorithms and data structures like a hash table. + + + + + Returns an enumerator that iterates through the collection. + + + A that can be used to iterate through the collection. + + + + + Returns an enumerator that iterates through a collection. + + + An object that can be used to iterate through the collection. + + + + + Returns a string that describes the type, dimensions and shape of this vector. + + + + + Returns a string that represents the content of this vector, column by column. + + Maximum number of entries and thus lines per column. Typical value: 12; Minimum: 3. + Maximum number of chatacters per line over all columns. Typical value: 80; Minimum: 16. + Character to use to print if there is not enough space to print all entries. Typical value: "..". + Character to use to separate two coluns on a line. Typical value: " " (2 spaces). + Character to use to separate two rows/lines. Typical value: Environment.NewLine. + Function to provide a string for any given entry value. + + + + Returns a string that represents the content of this vector, column by column. + + Maximum number of entries and thus lines per column. Typical value: 12; Minimum: 3. + Maximum number of chatacters per line over all columns. Typical value: 80; Minimum: 16. + Floating point format string. Can be null. Default value: G6. + Format provider or culture. Can be null. + + + + Returns a string that represents the content of this vector, column by column. + + Floating point format string. Can be null. Default value: G6. + Format provider or culture. Can be null. + + + + Returns a string that summarizes this vector, column by column and with a type header. + + Maximum number of entries and thus lines per column. Typical value: 12; Minimum: 3. + Maximum number of chatacters per line over all columns. Typical value: 80; Minimum: 16. + Floating point format string. Can be null. Default value: G6. + Format provider or culture. Can be null. + + + + Returns a string that summarizes this vector. + The maximum number of cells can be configured in the class. + + + + + Returns a string that summarizes this vector. + The maximum number of cells can be configured in the class. + The format string is ignored. + + + + + Initializes a new instance of the Vector class. + + + + + Gets the raw vector data storage. + + + + + Gets the length or number of dimensions of this vector. + + + + Gets or sets the value at the given . + The index of the value to get or set. + The value of the vector at the given . + If is negative or + greater than the size of the vector. + + + Gets the value at the given without range checking.. + The index of the value to get or set. + The value of the vector at the given . + + + Sets the at the given without range checking.. + The index of the value to get or set. + The value to set. + + + + Resets all values to zero. + + + + + Sets all values of a subvector to zero. + + + + + Set all values whose absolute value is smaller than the threshold to zero, in-place. + + + + + Set all values that meet the predicate to zero, in-place. + + + + + Returns a deep-copy clone of the vector. + + A deep-copy clone of the vector. + + + + Set the values of this vector to the given values. + + The array containing the values to use. + If is . + If is not the same size as this vector. + + + + Copies the values of this vector into the target vector. + + The vector to copy elements into. + If is . + If is not the same size as this vector. + + + + Creates a vector containing specified elements. + + The first element to begin copying from. + The number of elements to copy. + A vector containing a copy of the specified elements. + If is not positive or + greater than or equal to the size of the vector. + If + is greater than or equal to the size of the vector. + + If is not positive. + + + + Copies the values of a given vector into a region in this vector. + + The field to start copying to + The number of fields to cpy. Must be positive. + The sub-vector to copy from. + If is + + + + Copies the requested elements from this vector to another. + + The vector to copy the elements to. + The element to start copying from. + The element to start copying to. + The number of elements to copy. + + + + Returns the data contained in the vector as an array. + The returned array will be independent from this vector. + A new memory block will be allocated for the array. + + The vector's data as an array. + + + + Returns the internal array of this vector if, and only if, this vector is stored by such an array internally. + Otherwise returns null. Changes to the returned array and the vector will affect each other. + Use ToArray instead if you always need an independent array. + + + + + Create a matrix based on this vector in column form (one single column). + + + This vector as a column matrix. + + + + + Create a matrix based on this vector in row form (one single row). + + + This vector as a row matrix. + + + + + Returns an IEnumerable that can be used to iterate through all values of the vector. + + + The enumerator will include all values, even if they are zero. + + + + + Returns an IEnumerable that can be used to iterate through all values of the vector. + + + The enumerator will include all values, even if they are zero. + + + + + Returns an IEnumerable that can be used to iterate through all values of the vector and their index. + + + The enumerator returns a Tuple with the first value being the element index + and the second value being the value of the element at that index. + The enumerator will include all values, even if they are zero. + + + + + Returns an IEnumerable that can be used to iterate through all values of the vector and their index. + + + The enumerator returns a Tuple with the first value being the element index + and the second value being the value of the element at that index. + The enumerator will include all values, even if they are zero. + + + + + Returns an IEnumerable that can be used to iterate through all non-zero values of the vector. + + + The enumerator will skip all elements with a zero value. + + + + + Returns an IEnumerable that can be used to iterate through all non-zero values of the vector and their index. + + + The enumerator returns a Tuple with the first value being the element index + and the second value being the value of the element at that index. + The enumerator will skip all elements with a zero value. + + + + + Applies a function to each value of this vector and replaces the value with its result. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value of this vector and replaces the value with its result. + The index of each value (zero-based) is passed as first argument to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value of this vector and replaces the value in the result vector. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value of this vector and replaces the value in the result vector. + The index of each value (zero-based) is passed as first argument to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value of this vector and replaces the value in the result vector. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value of this vector and replaces the value in the result vector. + The index of each value (zero-based) is passed as first argument to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value of this vector and returns the results as a new vector. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value of this vector and returns the results as a new vector. + The index of each value (zero-based) is passed as first argument to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value pair of two vectors and replaces the value in the result vector. + + + + + Applies a function to each value pair of two vectors and returns the results as a new vector. + + + + + Applies a function to update the status with each value pair of two vectors and returns the resulting status. + + + + + Returns a tuple with the index and value of the first element satisfying a predicate, or null if none is found. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns a tuple with the index and values of the first element pair of two vectors of the same size satisfying a predicate, or null if none is found. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if at least one element satisfies a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if at least one element pairs of two vectors of the same size satisfies a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if all elements satisfy a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if all element pairs of two vectors of the same size satisfy a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns a Vector containing the same values of . + + This method is included for completeness. + The vector to get the values from. + A vector containing the same values as . + If is . + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Adds a scalar to each element of a vector. + + The vector to add to. + The scalar value to add. + The result of the addition. + If is . + + + + Adds a scalar to each element of a vector. + + The scalar value to add. + The vector to add to. + The result of the addition. + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Subtracts a scalar from each element of a vector. + + The vector to subtract from. + The scalar value to subtract. + The result of the subtraction. + If is . + + + + Substracts each element of a vector from a scalar. + + The scalar value to subtract from. + The vector to subtract. + The result of the subtraction. + If is . + + + + Multiplies a vector with a scalar. + + The vector to scale. + The scalar value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a scalar. + + The scalar value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a scalar with a vector. + + The scalar to divide. + The vector. + The result of the division. + If is . + + + + Divides a vector with a scalar. + + The vector to divide. + The scalar value. + The result of the division. + If is . + + + + Pointwise divides two Vectors. + + The vector to divide. + The other vector. + The result of the division. + If and are not the same size. + If is . + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + of each element of the vector of the given divisor. + + The vector whose elements we want to compute the remainder of. + The divisor to use. + If is . + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + of the given dividend of each element of the vector. + + The dividend we want to compute the remainder of. + The vector whose elements we want to use as divisor. + If is . + + + + Computes the pointwise remainder (% operator), where the result has the sign of the dividend, + of each element of two vectors. + + The vector whose elements we want to compute the remainder of. + The divisor to use. + If and are not the same size. + If is . + + + + Computes the sqrt of a vector pointwise + + The input vector + + + + + Computes the exponential of a vector pointwise + + The input vector + + + + + Computes the log of a vector pointwise + + The input vector + + + + + Computes the log10 of a vector pointwise + + The input vector + + + + + Computes the sin of a vector pointwise + + The input vector + + + + + Computes the cos of a vector pointwise + + The input vector + + + + + Computes the tan of a vector pointwise + + The input vector + + + + + Computes the asin of a vector pointwise + + The input vector + + + + + Computes the acos of a vector pointwise + + The input vector + + + + + Computes the atan of a vector pointwise + + The input vector + + + + + Computes the sinh of a vector pointwise + + The input vector + + + + + Computes the cosh of a vector pointwise + + The input vector + + + + + Computes the tanh of a vector pointwise + + The input vector + + + + + Computes the absolute value of a vector pointwise + + The input vector + + + + + Computes the floor of a vector pointwise + + The input vector + + + + + Computes the ceiling of a vector pointwise + + The input vector + + + + + Computes the rounded value of a vector pointwise + + The input vector + + + + + Converts a vector to single precision. + + + + + Converts a vector to double precision. + + + + + Converts a vector to single precision complex numbers. + + + + + Converts a vector to double precision complex numbers. + + + + + Gets a single precision complex vector with the real parts from the given vector. + + + + + Gets a double precision complex vector with the real parts from the given vector. + + + + + Gets a real vector representing the real parts of a complex vector. + + + + + Gets a real vector representing the real parts of a complex vector. + + + + + Gets a real vector representing the imaginary parts of a complex vector. + + + + + Gets a real vector representing the imaginary parts of a complex vector. + + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + + Predictor matrix X + Response vector Y + The direct method to be used to compute the regression. + Best fitting vector for model parameters β + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + + Predictor matrix X + Response matrix Y + The direct method to be used to compute the regression. + Best fitting vector for model parameters β + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + + List of predictor-arrays. + List of responses + True if an intercept should be added as first artificial predictor value. Default = false. + The direct method to be used to compute the regression. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + Uses the cholesky decomposition of the normal equations. + + Sequence of predictor-arrays and their response. + True if an intercept should be added as first artificial predictor value. Default = false. + The direct method to be used to compute the regression. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + Uses the cholesky decomposition of the normal equations. + + Predictor matrix X + Response vector Y + Best fitting vector for model parameters β + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + Uses the cholesky decomposition of the normal equations. + + Predictor matrix X + Response matrix Y + Best fitting vector for model parameters β + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + Uses the cholesky decomposition of the normal equations. + + List of predictor-arrays. + List of responses + True if an intercept should be added as first artificial predictor value. Default = false. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + Uses the cholesky decomposition of the normal equations. + + Sequence of predictor-arrays and their response. + True if an intercept should be added as first artificial predictor value. Default = false. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + Uses an orthogonal decomposition and is therefore more numerically stable than the normal equations but also slower. + + Predictor matrix X + Response vector Y + Best fitting vector for model parameters β + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + Uses an orthogonal decomposition and is therefore more numerically stable than the normal equations but also slower. + + Predictor matrix X + Response matrix Y + Best fitting vector for model parameters β + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + Uses an orthogonal decomposition and is therefore more numerically stable than the normal equations but also slower. + + List of predictor-arrays. + List of responses + True if an intercept should be added as first artificial predictor value. Default = false. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + Uses an orthogonal decomposition and is therefore more numerically stable than the normal equations but also slower. + + Sequence of predictor-arrays and their response. + True if an intercept should be added as first artificial predictor value. Default = false. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + Uses a singular value decomposition and is therefore more numerically stable (especially if ill-conditioned) than the normal equations or QR but also slower. + + Predictor matrix X + Response vector Y + Best fitting vector for model parameters β + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + Uses a singular value decomposition and is therefore more numerically stable (especially if ill-conditioned) than the normal equations or QR but also slower. + + Predictor matrix X + Response matrix Y + Best fitting vector for model parameters β + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + Uses a singular value decomposition and is therefore more numerically stable (especially if ill-conditioned) than the normal equations or QR but also slower. + + List of predictor-arrays. + List of responses + True if an intercept should be added as first artificial predictor value. Default = false. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + Uses a singular value decomposition and is therefore more numerically stable (especially if ill-conditioned) than the normal equations or QR but also slower. + + Sequence of predictor-arrays and their response. + True if an intercept should be added as first artificial predictor value. Default = false. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Least-Squares fitting the points (x,y) to a line y : x -> a+b*x, + returning its best fitting parameters as (a, b) tuple, + where a is the intercept and b the slope. + + Predictor (independent) + Response (dependent) + + + + Least-Squares fitting the points (x,y) to a line y : x -> a+b*x, + returning its best fitting parameters as (a, b) tuple, + where a is the intercept and b the slope. + + Predictor-Response samples as tuples + + + + Weighted Linear Regression using normal equations. + + Predictor matrix X + Response vector Y + Weight matrix W, usually diagonal with an entry for each predictor (row). + + + + Weighted Linear Regression using normal equations. + + Predictor matrix X + Response matrix Y + Weight matrix W, usually diagonal with an entry for each predictor (row). + + + + Weighted Linear Regression using normal equations. + + Predictor matrix X + Response vector Y + Weight matrix W, usually diagonal with an entry for each predictor (row). + True if an intercept should be added as first artificial predictor value. Default = false. + + + + Weighted Linear Regression using normal equations. + + List of sample vectors (predictor) together with their response. + List of weights, one for each sample. + True if an intercept should be added as first artificial predictor value. Default = false. + + + + Locally-Weighted Linear Regression using normal equations. + + + + + Locally-Weighted Linear Regression using normal equations. + + + + + First Order AB method(same as Forward Euler) + + Initial value + Start Time + End Time + Size of output array(the larger, the finer) + ode model + approximation with size N + + + + Second Order AB Method + + Initial value 1 + Start Time + End Time + Size of output array(the larger, the finer) + ode model + approximation with size N + + + + Third Order AB Method + + Initial value 1 + Start Time + End Time + Size of output array(the larger, the finer) + ode model + approximation with size N + + + + Fourth Order AB Method + + Initial value 1 + Start Time + End Time + Size of output array(the larger, the finer) + ode model + approximation with size N + + + + ODE Solver Algorithms + + + + + Second Order Runge-Kutta method + + initial value + start time + end time + Size of output array(the larger, the finer) + ode function + approximations + + + + Fourth Order Runge-Kutta method + + initial value + start time + end time + Size of output array(the larger, the finer) + ode function + approximations + + + + Second Order Runge-Kutta to solve ODE SYSTEM + + initial vector + start time + end time + Size of output array(the larger, the finer) + ode function + approximations + + + + Fourth Order Runge-Kutta to solve ODE SYSTEM + + initial vector + start time + end time + Size of output array(the larger, the finer) + ode function + approximations + + + + Class to represent a permutation for a subset of the natural numbers. + + + + + Entry _indices[i] represents the location to which i is permuted to. + + + + + Initializes a new instance of the Permutation class. + + An array which represents where each integer is permuted too: indices[i] represents that integer i + is permuted to location indices[i]. + + + + Gets the number of elements this permutation is over. + + + + + Computes where permutes too. + + The index to permute from. + The index which is permuted to. + + + + Computes the inverse of the permutation. + + The inverse of the permutation. + + + + Construct an array from a sequence of inversions. + + + From wikipedia: the permutation 12043 has the inversions (0,2), (1,2) and (3,4). This would be + encoded using the array [22244]. + + The set of inversions to construct the permutation from. + A permutation generated from a sequence of inversions. + + + + Construct a sequence of inversions from the permutation. + + + From wikipedia: the permutation 12043 has the inversions (0,2), (1,2) and (3,4). This would be + encoded using the array [22244]. + + A sequence of inversions. + + + + Checks whether the array represents a proper permutation. + + An array which represents where each integer is permuted too: indices[i] represents that integer i + is permuted to location indices[i]. + True if represents a proper permutation, false otherwise. + + + + Utilities for working with floating point numbers. + + + + Useful links: + + + http://docs.sun.com/source/806-3568/ncg_goldberg.html#689 - What every computer scientist should know about floating-point arithmetic + + + http://en.wikipedia.org/wiki/Machine_epsilon - Gives the definition of machine epsilon + + + + + + + + Compares two doubles and determines which double is bigger. + a < b -> -1; a ~= b (almost equal according to parameter) -> 0; a > b -> +1. + + The first value. + The second value. + The absolute accuracy required for being almost equal. + + + + Compares two doubles and determines which double is bigger. + a < b -> -1; a ~= b (almost equal according to parameter) -> 0; a > b -> +1. + + The first value. + The second value. + The number of decimal places on which the values must be compared. Must be 1 or larger. + + + + Compares two doubles and determines which double is bigger. + a < b -> -1; a ~= b (almost equal according to parameter) -> 0; a > b -> +1. + + The first value. + The second value. + The relative accuracy required for being almost equal. + + + + Compares two doubles and determines which double is bigger. + a < b -> -1; a ~= b (almost equal according to parameter) -> 0; a > b -> +1. + + The first value. + The second value. + The number of decimal places on which the values must be compared. Must be 1 or larger. + + + + Compares two doubles and determines which double is bigger. + a < b -> -1; a ~= b (almost equal according to parameter) -> 0; a > b -> +1. + + The first value. + The second value. + The maximum error in terms of Units in Last Place (ulps), i.e. the maximum number of decimals that may be different. Must be 1 or larger. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The absolute accuracy required for being almost equal. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The absolute accuracy required for being almost equal. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The relative accuracy required for being almost equal. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The relative accuracy required for being almost equal. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the tolerance or not. Equality comparison is based on the binary representation. + + The first value. + The second value. + The maximum number of floating point values for which the two values are considered equal. Must be 1 or larger. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the tolerance or not. Equality comparison is based on the binary representation. + + The first value. + The second value. + The maximum number of floating point values for which the two values are considered equal. Must be 1 or larger. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of thg. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of thg. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The absolute accuracy required for being almost equal. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The absolute accuracy required for being almost equal. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The number of decimal places. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The number of decimal places. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The relative accuracy required for being almost equal. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The relative accuracy required for being almost equal. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the tolerance or not. Equality comparison is based on the binary representation. + + The first value. + The second value. + The maximum number of floating point values for which the two values are considered equal. Must be 1 or larger. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the tolerance or not. Equality comparison is based on the binary representation. + + The first value. + The second value. + The maximum number of floating point values for which the two values are considered equal. Must be 1 or larger. + true if the first value is smaller than the second value; otherwise false. + + + + The number of binary digits used to represent the binary number for a double precision floating + point value. i.e. there are this many digits used to represent the + actual number, where in a number as: 0.134556 * 10^5 the digits are 0.134556 and the exponent is 5. + + + + + The number of binary digits used to represent the binary number for a single precision floating + point value. i.e. there are this many digits used to represent the + actual number, where in a number as: 0.134556 * 10^5 the digits are 0.134556 and the exponent is 5. + + + + + Standard epsilon, the maximum relative precision of IEEE 754 double-precision floating numbers (64 bit). + According to the definition of Prof. Demmel and used in LAPACK and Scilab. + + + + + Standard epsilon, the maximum relative precision of IEEE 754 double-precision floating numbers (64 bit). + According to the definition of Prof. Higham and used in the ISO C standard and MATLAB. + + + + + Standard epsilon, the maximum relative precision of IEEE 754 single-precision floating numbers (32 bit). + According to the definition of Prof. Demmel and used in LAPACK and Scilab. + + + + + Standard epsilon, the maximum relative precision of IEEE 754 single-precision floating numbers (32 bit). + According to the definition of Prof. Higham and used in the ISO C standard and MATLAB. + + + + + Actual double precision machine epsilon, the smallest number that can be subtracted from 1, yielding a results different than 1. + This is also known as unit roundoff error. According to the definition of Prof. Demmel. + On a standard machine this is equivalent to `DoublePrecision`. + + + + + Actual double precision machine epsilon, the smallest number that can be added to 1, yielding a results different than 1. + This is also known as unit roundoff error. According to the definition of Prof. Higham. + On a standard machine this is equivalent to `PositiveDoublePrecision`. + + + + + The number of significant decimal places of double-precision floating numbers (64 bit). + + + + + The number of significant decimal places of single-precision floating numbers (32 bit). + + + + + Value representing 10 * 2^(-53) = 1.11022302462516E-15 + + + + + Value representing 10 * 2^(-24) = 5.96046447753906E-07 + + + + + Returns the magnitude of the number. + + The value. + The magnitude of the number. + + + + Returns the magnitude of the number. + + The value. + The magnitude of the number. + + + + Returns the number divided by it's magnitude, effectively returning a number between -10 and 10. + + The value. + The value of the number. + + + + Returns a 'directional' long value. This is a long value which acts the same as a double, + e.g. a negative double value will return a negative double value starting at 0 and going + more negative as the double value gets more negative. + + The input double value. + A long value which is roughly the equivalent of the double value. + + + + Returns a 'directional' int value. This is a int value which acts the same as a float, + e.g. a negative float value will return a negative int value starting at 0 and going + more negative as the float value gets more negative. + + The input float value. + An int value which is roughly the equivalent of the double value. + + + + Increments a floating point number to the next bigger number representable by the data type. + + The value which needs to be incremented. + How many times the number should be incremented. + + The incrementation step length depends on the provided value. + Increment(double.MaxValue) will return positive infinity. + + The next larger floating point value. + + + + Decrements a floating point number to the next smaller number representable by the data type. + + The value which should be decremented. + How many times the number should be decremented. + + The decrementation step length depends on the provided value. + Decrement(double.MinValue) will return negative infinity. + + The next smaller floating point value. + + + + Forces small numbers near zero to zero, according to the specified absolute accuracy. + + The real number to coerce to zero, if it is almost zero. + The maximum count of numbers between the zero and the number . + + Zero if || is fewer than numbers from zero, otherwise. + + + + + Forces small numbers near zero to zero, according to the specified absolute accuracy. + + The real number to coerce to zero, if it is almost zero. + The maximum count of numbers between the zero and the number . + + Zero if || is fewer than numbers from zero, otherwise. + + + Thrown if is smaller than zero. + + + + + Forces small numbers near zero to zero, according to the specified absolute accuracy. + + The real number to coerce to zero, if it is almost zero. + The absolute threshold for to consider it as zero. + Zero if || is smaller than , otherwise. + + Thrown if is smaller than zero. + + + + + Forces small numbers near zero to zero. + + The real number to coerce to zero, if it is almost zero. + Zero if || is smaller than 2^(-53) = 1.11e-16, otherwise. + + + + Determines the range of floating point numbers that will match the specified value with the given tolerance. + + The value. + The ulps difference. + + Thrown if is smaller than zero. + + Tuple of the bottom and top range ends. + + + + Returns the floating point number that will match the value with the tolerance on the maximum size (i.e. the result is + always bigger than the value) + + The value. + The ulps difference. + The maximum floating point number which is larger than the given . + + + + Returns the floating point number that will match the value with the tolerance on the minimum size (i.e. the result is + always smaller than the value) + + The value. + The ulps difference. + The minimum floating point number which is smaller than the given . + + + + Determines the range of ulps that will match the specified value with the given tolerance. + + The value. + The relative difference. + + Thrown if is smaller than zero. + + + Thrown if is double.PositiveInfinity or double.NegativeInfinity. + + + Thrown if is double.NaN. + + + Tuple with the number of ULPS between the value and the value - relativeDifference as first, + and the number of ULPS between the value and the value + relativeDifference as second value. + + + + + Evaluates the count of numbers between two double numbers + + The first parameter. + The second parameter. + The second number is included in the number, thus two equal numbers evaluate to zero and two neighbor numbers evaluate to one. Therefore, what is returned is actually the count of numbers between plus 1. + The number of floating point values between and . + + Thrown if is double.PositiveInfinity or double.NegativeInfinity. + + + Thrown if is double.NaN. + + + Thrown if is double.PositiveInfinity or double.NegativeInfinity. + + + Thrown if is double.NaN. + + + + + Evaluates the minimum distance to the next distinguishable number near the argument value. + + The value used to determine the minimum distance. + + Relative Epsilon (positive double or NaN). + + Evaluates the negative epsilon. The more common positive epsilon is equal to two times this negative epsilon. + + + + + Evaluates the minimum distance to the next distinguishable number near the argument value. + + The value used to determine the minimum distance. + + Relative Epsilon (positive float or NaN). + + Evaluates the negative epsilon. The more common positive epsilon is equal to two times this negative epsilon. + + + + + Evaluates the minimum distance to the next distinguishable number near the argument value. + + The value used to determine the minimum distance. + Relative Epsilon (positive double or NaN) + Evaluates the positive epsilon. See also + + + + + Evaluates the minimum distance to the next distinguishable number near the argument value. + + The value used to determine the minimum distance. + Relative Epsilon (positive float or NaN) + Evaluates the positive epsilon. See also + + + + + Calculates the actual (negative) double precision machine epsilon - the smallest number that can be subtracted from 1, yielding a results different than 1. + This is also known as unit roundoff error. According to the definition of Prof. Demmel. + + Positive Machine epsilon + + + + Calculates the actual positive double precision machine epsilon - the smallest number that can be added to 1, yielding a results different than 1. + This is also known as unit roundoff error. According to the definition of Prof. Higham. + + Machine epsilon + + + + Compares two doubles and determines if they are equal + within the specified maximum absolute error. + + The norm of the first value (can be negative). + The norm of the second value (can be negative). + The norm of the difference of the two values (can be negative). + The absolute accuracy required for being almost equal. + True if both doubles are almost equal up to the specified maximum absolute error, false otherwise. + + + + Compares two doubles and determines if they are equal + within the specified maximum absolute error. + + The first value. + The second value. + The absolute accuracy required for being almost equal. + True if both doubles are almost equal up to the specified maximum absolute error, false otherwise. + + + + Compares two doubles and determines if they are equal + within the specified maximum error. + + The norm of the first value (can be negative). + The norm of the second value (can be negative). + The norm of the difference of the two values (can be negative). + The accuracy required for being almost equal. + True if both doubles are almost equal up to the specified maximum error, false otherwise. + + + + Compares two doubles and determines if they are equal + within the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + True if both doubles are almost equal up to the specified maximum error, false otherwise. + + + + Compares two doubles and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two complex and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two complex and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two complex and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two doubles and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two complex and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two complex and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two complex and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Checks whether two real numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Checks whether two real numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Checks whether two Compex numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Checks whether two Compex numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Checks whether two real numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Checks whether two real numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Checks whether two Compex numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Checks whether two Compex numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not, using the + number of decimal places as an absolute measure. + + + + The values are equal if the difference between the two numbers is smaller than 0.5e-decimalPlaces. We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The norm of the first value (can be negative). + The norm of the second value (can be negative). + The norm of the difference of the two values (can be negative). + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not, using the + number of decimal places as an absolute measure. + + + + The values are equal if the difference between the two numbers is smaller than 0.5e-decimalPlaces. We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not. If the numbers + are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The norm of the first value (can be negative). + The norm of the second value (can be negative). + The norm of the difference of the two values (can be negative). + The number of decimal places. + Thrown if is smaller than zero. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not. If the numbers + are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not, using the + number of decimal places as an absolute measure. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not, using the + number of decimal places as an absolute measure. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not, using the + number of decimal places as an absolute measure. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not, using the + number of decimal places as an absolute measure. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not. If the numbers + are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not. If the numbers + are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not. If the numbers + are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not. If the numbers + are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the tolerance or not. Equality comparison is based on the binary representation. + + + + Determines the 'number' of floating point numbers between two values (i.e. the number of discrete steps + between the two numbers) and then checks if that is within the specified tolerance. So if a tolerance + of 1 is passed then the result will be true only if the two numbers have the same binary representation + OR if they are two adjacent numbers that only differ by one step. + + + The comparison method used is explained in http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm . The article + at http://www.extremeoptimization.com/resources/Articles/FPDotNetConceptsAndFormats.aspx explains how to transform the C code to + .NET enabled code without using pointers and unsafe code. + + + The first value. + The second value. + The maximum number of floating point values between the two values. Must be 1 or larger. + Thrown if is smaller than one. + + + + Compares two floats and determines if they are equal to within the tolerance or not. Equality comparison is based on the binary representation. + + The first value. + The second value. + The maximum number of floating point values between the two values. Must be 1 or larger. + Thrown if is smaller than one. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two vectors and determines if they are equal within the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two vectors and determines if they are equal within the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two vectors and determines if they are equal to within the specified number + of decimal places or not, using the number of decimal places as an absolute measure. + + The first value. + The second value. + The number of decimal places. + + + + Compares two vectors and determines if they are equal to within the specified number of decimal places or not. + If the numbers are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + The first value. + The second value. + The number of decimal places. + + + + Compares two matrices and determines if they are equal within the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two matrices and determines if they are equal within the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two matrices and determines if they are equal to within the specified number + of decimal places or not, using the number of decimal places as an absolute measure. + + The first value. + The second value. + The number of decimal places. + + + + Compares two matrices and determines if they are equal to within the specified number of decimal places or not. + If the numbers are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + The first value. + The second value. + The number of decimal places. + + + + Support Interface for Precision Operations (like AlmostEquals). + + Type of the implementing class. + + + + Returns a Norm of a value of this type, which is appropriate for measuring how + close this value is to zero. + + A norm of this value. + + + + Returns a Norm of the difference of two values of this type, which is + appropriate for measuring how close together these two values are. + + The value to compare with. + A norm of the difference between this and the other value. + + + + Consistency vs. performance trade-off between runs on different machines. + + + + Consistent on the same CPU only (maximum performance) + + + Consistent on Intel and compatible CPUs with SSE2 support (maximum compatibility) + + + Consistent on Intel CPUs supporting SSE2 or later + + + Consistent on Intel CPUs supporting SSE4.2 or later + + + Consistent on Intel CPUs supporting AVX or later + + + Consistent on Intel CPUs supporting AVX2 or later + + + + Use the best provider available. + + + + + Use a specific provider if configured, e.g. using the + "MathNetNumericsFFTProvider" environment variable, + or fall back to the best provider. + + + + + Try to find out whether the provider is available, at least in principle. + Verification may still fail if available, but it will certainly fail if unavailable. + + + + + Initialize and verify that the provided is indeed available. If not, fall back to alternatives like the managed provider + + + + + Try to find out whether the provider is available, at least in principle. + Verification may still fail if available, but it will certainly fail if unavailable. + + + + + Initialize and verify that the provided is indeed available. If not, fall back to alternatives like the managed provider + + + + + How to transpose a matrix. + + + + + Don't transpose a matrix. + + + + + Transpose a matrix. + + + + + Conjugate transpose a complex matrix. + + If a conjugate transpose is used with a real matrix, then the matrix is just transposed. + + + + Types of matrix norms. + + + + + The 1-norm. + + + + + The Frobenius norm. + + + + + The infinity norm. + + + + + The largest absolute value norm. + + + + + Interface to linear algebra algorithms that work off 1-D arrays. + + + + + Try to find out whether the provider is available, at least in principle. + Verification may still fail if available, but it will certainly fail if unavailable. + + + + + Initialize and verify that the provided is indeed available. If not, fall back to alternatives like the managed provider + + + + + Interface to linear algebra algorithms that work off 1-D arrays. + + Supported data types are Double, Single, Complex, and Complex32. + + + + Adds a scaled vector to another: result = y + alpha*x. + + The vector to update. + The value to scale by. + The vector to add to . + The result of the addition. + This is similar to the AXPY BLAS routine. + + + + Scales an array. Can be used to scale a vector and a matrix. + + The scalar. + The values to scale. + This result of the scaling. + This is similar to the SCAL BLAS routine. + + + + Conjugates an array. Can be used to conjugate a vector and a matrix. + + The values to conjugate. + This result of the conjugation. + + + + Computes the dot product of x and y. + + The vector x. + The vector y. + The dot product of x and y. + This is equivalent to the DOT BLAS routine. + + + + Does a point wise add of two arrays z = x + y. This can be used + to add vectors or matrices. + + The array x. + The array y. + The result of the addition. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise subtraction of two arrays z = x - y. This can be used + to subtract vectors or matrices. + + The array x. + The array y. + The result of the subtraction. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise multiplication of two arrays z = x * y. This can be used + to multiply elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise multiplication. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise division of two arrays z = x / y. This can be used + to divide elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise division. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise power of two arrays z = x ^ y. This can be used + to raise elements of vectors or matrices to the powers of another vector or matrix. + + The array x. + The array y. + The result of the point wise power. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Computes the requested of the matrix. + + The type of norm to compute. + The number of rows. + The number of columns. + The matrix to compute the norm from. + + The requested of the matrix. + + + + + Multiples two matrices. result = x * y + + The x matrix. + The number of rows in the x matrix. + The number of columns in the x matrix. + The y matrix. + The number of rows in the y matrix. + The number of columns in the y matrix. + Where to store the result of the multiplication. + This is a simplified version of the BLAS GEMM routine with alpha + set to 1.0 and beta set to 0.0, and x and y are not transposed. + + + + Multiplies two matrices and updates another with the result. c = alpha*op(a)*op(b) + beta*c + + How to transpose the matrix. + How to transpose the matrix. + The value to scale matrix. + The a matrix. + The number of rows in the matrix. + The number of columns in the matrix. + The b matrix + The number of rows in the matrix. + The number of columns in the matrix. + The value to scale the matrix. + The c matrix. + + + + Computes the LUP factorization of A. P*A = L*U. + + An by matrix. The matrix is overwritten with the + the LU factorization on exit. The lower triangular factor L is stored in under the diagonal of (the diagonal is always 1.0 + for the L factor). The upper triangular factor U is stored on and above the diagonal of . + The order of the square matrix . + On exit, it contains the pivot indices. The size of the array must be . + This is equivalent to the GETRF LAPACK routine. + + + + Computes the inverse of matrix using LU factorization. + + The N by N matrix to invert. Contains the inverse On exit. + The order of the square matrix . + This is equivalent to the GETRF and GETRI LAPACK routines. + + + + Computes the inverse of a previously factored matrix. + + The LU factored N by N matrix. Contains the inverse On exit. + The order of the square matrix . + The pivot indices of . + This is equivalent to the GETRI LAPACK routine. + + + + Solves A*X=B for X using LU factorization. + + The number of columns of B. + The square matrix A. + The order of the square matrix . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRF and GETRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The number of columns of B. + The factored A matrix. + The order of the square matrix . + The pivot indices of . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRS LAPACK routine. + + + + Computes the Cholesky factorization of A. + + On entry, a square, positive definite matrix. On exit, the matrix is overwritten with the + the Cholesky factorization. + The number of rows or columns in the matrix. + This is equivalent to the POTRF LAPACK routine. + + + + Solves A*X=B for X using Cholesky factorization. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRF add POTRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRS LAPACK routine. + + + + Computes the full QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the R matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A M by M matrix that holds the Q matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Computes the thin QR factorization of A where M > N. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the Q matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A N by N matrix that holds the R matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Solves A*X=B for X using QR factorization of A. + + The A matrix. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Solves A*X=B for X using a previously QR factored matrix. + + The Q matrix obtained by QR factor. This is only used for the managed provider and can be + null for the native provider. The native provider uses the Q portion stored in the R matrix. + The R matrix obtained by calling . + The number of rows in the A matrix. + The number of columns in the A matrix. + Contains additional information on Q. Only used for the native solver + and can be null for the managed provider. + On entry the B matrix; on exit the X matrix. + The number of columns of B. + On exit, the solution matrix. + Rows must be greater or equal to columns. + The type of QR factorization to perform. + + + + Computes the singular value decomposition of A. + + Compute the singular U and VT vectors or not. + On entry, the M by N matrix to decompose. On exit, A may be overwritten. + The number of rows in the A matrix. + The number of columns in the A matrix. + The singular values of A in ascending value. + If is true, on exit U contains the left + singular vectors. + If is true, on exit VT contains the transposed + right singular vectors. + This is equivalent to the GESVD LAPACK routine. + + + + Solves A*X=B for X using the singular value decomposition of A. + + On entry, the M by N matrix to decompose. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Solves A*X=B for X using a previously SVD decomposed matrix. + + The number of rows in the A matrix. + The number of columns in the A matrix. + The s values returned by . + The left singular vectors returned by . + The right singular vectors returned by . + The B matrix + The number of columns of B. + On exit, the solution matrix. + + + + Computes the eigenvalues and eigenvectors of a matrix. + + Whether the matrix is symmetric or not. + The order of the matrix. + The matrix to decompose. The lenth of the array must be order * order. + On output, the matrix contains the eigen vectors. The lenth of the array must be order * order. + On output, the eigen values (λ) of matrix in ascending value. The length of the arry must . + On output, the block diagonal eigenvalue matrix. The lenth of the array must be order * order. + + + + Use the best provider available. + + + + + Use a specific provider if configured, e.g. using the + "MathNetNumericsLAProvider" environment variable, + or fall back to the best provider. + + + + + The managed linear algebra provider. + + + The managed linear algebra provider. + + + The managed linear algebra provider. + + + The managed linear algebra provider. + + + The managed linear algebra provider. + + + + + Adds a scaled vector to another: result = y + alpha*x. + + The vector to update. + The value to scale by. + The vector to add to . + The result of the addition. + This is similar to the AXPY BLAS routine. + + + + Scales an array. Can be used to scale a vector and a matrix. + + The scalar. + The values to scale. + This result of the scaling. + This is similar to the SCAL BLAS routine. + + + + Conjugates an array. Can be used to conjugate a vector and a matrix. + + The values to conjugate. + This result of the conjugation. + + + + Computes the dot product of x and y. + + The vector x. + The vector y. + The dot product of x and y. + This is equivalent to the DOT BLAS routine. + + + + Does a point wise add of two arrays z = x + y. This can be used + to add vectors or matrices. + + The array x. + The array y. + The result of the addition. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise subtraction of two arrays z = x - y. This can be used + to subtract vectors or matrices. + + The array x. + The array y. + The result of the subtraction. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise multiplication of two arrays z = x * y. This can be used + to multiple elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise multiplication. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise division of two arrays z = x / y. This can be used + to divide elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise division. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise power of two arrays z = x ^ y. This can be used + to raise elements of vectors or matrices to the powers of another vector or matrix. + + The array x. + The array y. + The result of the point wise power. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Computes the requested of the matrix. + + The type of norm to compute. + The number of rows. + The number of columns. + The matrix to compute the norm from. + + The requested of the matrix. + + + + + Multiples two matrices. result = x * y + + The x matrix. + The number of rows in the x matrix. + The number of columns in the x matrix. + The y matrix. + The number of rows in the y matrix. + The number of columns in the y matrix. + Where to store the result of the multiplication. + This is a simplified version of the BLAS GEMM routine with alpha + set to 1.0 and beta set to 0.0, and x and y are not transposed. + + + + Multiplies two matrices and updates another with the result. c = alpha*op(a)*op(b) + beta*c + + How to transpose the matrix. + How to transpose the matrix. + The value to scale matrix. + The a matrix. + The number of rows in the matrix. + The number of columns in the matrix. + The b matrix + The number of rows in the matrix. + The number of columns in the matrix. + The value to scale the matrix. + The c matrix. + + + + Cache-Oblivious Matrix Multiplication + + if set to true transpose matrix A. + if set to true transpose matrix B. + The value to scale the matrix A with. + The matrix A. + Row-shift of the left matrix + Column-shift of the left matrix + The matrix B. + Row-shift of the right matrix + Column-shift of the right matrix + The matrix C. + Row-shift of the result matrix + Column-shift of the result matrix + The number of rows of matrix op(A) and of the matrix C. + The number of columns of matrix op(B) and of the matrix C. + The number of columns of matrix op(A) and the rows of the matrix op(B). + The constant number of rows of matrix op(A) and of the matrix C. + The constant number of columns of matrix op(B) and of the matrix C. + The constant number of columns of matrix op(A) and the rows of the matrix op(B). + Indicates if this is the first recursion. + + + + Computes the LUP factorization of A. P*A = L*U. + + An by matrix. The matrix is overwritten with the + the LU factorization on exit. The lower triangular factor L is stored in under the diagonal of (the diagonal is always 1.0 + for the L factor). The upper triangular factor U is stored on and above the diagonal of . + The order of the square matrix . + On exit, it contains the pivot indices. The size of the array must be . + This is equivalent to the GETRF LAPACK routine. + + + + Computes the inverse of matrix using LU factorization. + + The N by N matrix to invert. Contains the inverse On exit. + The order of the square matrix . + This is equivalent to the GETRF and GETRI LAPACK routines. + + + + Computes the inverse of a previously factored matrix. + + The LU factored N by N matrix. Contains the inverse On exit. + The order of the square matrix . + The pivot indices of . + This is equivalent to the GETRI LAPACK routine. + + + + Solves A*X=B for X using LU factorization. + + The number of columns of B. + The square matrix A. + The order of the square matrix . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRF and GETRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The number of columns of B. + The factored A matrix. + The order of the square matrix . + The pivot indices of . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRS LAPACK routine. + + + + Computes the Cholesky factorization of A. + + On entry, a square, positive definite matrix. On exit, the matrix is overwritten with the + the Cholesky factorization. + The number of rows or columns in the matrix. + This is equivalent to the POTRF LAPACK routine. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves A*X=B for X using Cholesky factorization. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRF add POTRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. + The number of rows and columns in A. + The B matrix. + The number of columns in the B matrix. + This is equivalent to the POTRS LAPACK routine. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. Has to be different than . + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The column to solve for. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the R matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A M by M matrix that holds the Q matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the Q matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A N by N matrix that holds the R matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Perform calculation of Q or R + + Work array + Index of column in work array + Q or R matrices + The first row in + The last row + The first column + The last column + Number of available CPUs + + + + Generate column from initial matrix to work array + + Work array + Initial matrix + The number of rows in matrix + The first row + Column index + + + + Solves A*X=B for X using QR factorization of A. + + The A matrix. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Solves A*X=B for X using a previously QR factored matrix. + + The Q matrix obtained by calling . + The R matrix obtained by calling . + The number of rows in the A matrix. + The number of columns in the A matrix. + Contains additional information on Q. Only used for the native solver + and can be null for the managed provider. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Computes the singular value decomposition of A. + + Compute the singular U and VT vectors or not. + On entry, the M by N matrix to decompose. On exit, A may be overwritten. + The number of rows in the A matrix. + The number of columns in the A matrix. + The singular values of A in ascending value. + If is true, on exit U contains the left + singular vectors. + If is true, on exit VT contains the transposed + right singular vectors. + This is equivalent to the GESVD LAPACK routine. + + + + Solves A*X=B for X using the singular value decomposition of A. + + On entry, the M by N matrix to decompose. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Solves A*X=B for X using a previously SVD decomposed matrix. + + The number of rows in the A matrix. + The number of columns in the A matrix. + The s values returned by . + The left singular vectors returned by . + The right singular vectors returned by . + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Computes the eigenvalues and eigenvectors of a matrix. + + Whether the matrix is symmetric or not. + The order of the matrix. + The matrix to decompose. The lenth of the array must be order * order. + On output, the matrix contains the eigen vectors. The lenth of the array must be order * order. + On output, the eigen values (λ) of matrix in ascending value. The length of the arry must . + On output, the block diagonal eigenvalue matrix. The lenth of the array must be order * order. + + + + Assumes that and have already been transposed. + + + + + Assumes that and have already been transposed. + + + + + Adds a scaled vector to another: result = y + alpha*x. + + The vector to update. + The value to scale by. + The vector to add to . + The result of the addition. + This is similar to the AXPY BLAS routine. + + + + Scales an array. Can be used to scale a vector and a matrix. + + The scalar. + The values to scale. + This result of the scaling. + This is similar to the SCAL BLAS routine. + + + + Conjugates an array. Can be used to conjugate a vector and a matrix. + + The values to conjugate. + This result of the conjugation. + + + + Computes the dot product of x and y. + + The vector x. + The vector y. + The dot product of x and y. + This is equivalent to the DOT BLAS routine. + + + + Does a point wise add of two arrays z = x + y. This can be used + to add vectors or matrices. + + The array x. + The array y. + The result of the addition. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise subtraction of two arrays z = x - y. This can be used + to subtract vectors or matrices. + + The array x. + The array y. + The result of the subtraction. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise multiplication of two arrays z = x * y. This can be used + to multiple elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise multiplication. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise division of two arrays z = x / y. This can be used + to divide elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise division. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise power of two arrays z = x ^ y. This can be used + to raise elements of vectors or matrices to the powers of another vector or matrix. + + The array x. + The array y. + The result of the point wise power. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Computes the requested of the matrix. + + The type of norm to compute. + The number of rows. + The number of columns. + The matrix to compute the norm from. + The requested of the matrix. + + + + Multiples two matrices. result = x * y + + The x matrix. + The number of rows in the x matrix. + The number of columns in the x matrix. + The y matrix. + The number of rows in the y matrix. + The number of columns in the y matrix. + Where to store the result of the multiplication. + This is a simplified version of the BLAS GEMM routine with alpha + set to 1.0 and beta set to 0.0, and x and y are not transposed. + + + + Multiplies two matrices and updates another with the result. c = alpha*op(a)*op(b) + beta*c + + How to transpose the matrix. + How to transpose the matrix. + The value to scale matrix. + The a matrix. + The number of rows in the matrix. + The number of columns in the matrix. + The b matrix + The number of rows in the matrix. + The number of columns in the matrix. + The value to scale the matrix. + The c matrix. + + + + Cache-Oblivious Matrix Multiplication + + if set to true transpose matrix A. + if set to true transpose matrix B. + The value to scale the matrix A with. + The matrix A. + Row-shift of the left matrix + Column-shift of the left matrix + The matrix B. + Row-shift of the right matrix + Column-shift of the right matrix + The matrix C. + Row-shift of the result matrix + Column-shift of the result matrix + The number of rows of matrix op(A) and of the matrix C. + The number of columns of matrix op(B) and of the matrix C. + The number of columns of matrix op(A) and the rows of the matrix op(B). + The constant number of rows of matrix op(A) and of the matrix C. + The constant number of columns of matrix op(B) and of the matrix C. + The constant number of columns of matrix op(A) and the rows of the matrix op(B). + Indicates if this is the first recursion. + + + + Computes the LUP factorization of A. P*A = L*U. + + An by matrix. The matrix is overwritten with the + the LU factorization on exit. The lower triangular factor L is stored in under the diagonal of (the diagonal is always 1.0 + for the L factor). The upper triangular factor U is stored on and above the diagonal of . + The order of the square matrix . + On exit, it contains the pivot indices. The size of the array must be . + This is equivalent to the GETRF LAPACK routine. + + + + Computes the inverse of matrix using LU factorization. + + The N by N matrix to invert. Contains the inverse On exit. + The order of the square matrix . + This is equivalent to the GETRF and GETRI LAPACK routines. + + + + Computes the inverse of a previously factored matrix. + + The LU factored N by N matrix. Contains the inverse On exit. + The order of the square matrix . + The pivot indices of . + This is equivalent to the GETRI LAPACK routine. + + + + Solves A*X=B for X using LU factorization. + + The number of columns of B. + The square matrix A. + The order of the square matrix . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRF and GETRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The number of columns of B. + The factored A matrix. + The order of the square matrix . + The pivot indices of . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRS LAPACK routine. + + + + Computes the Cholesky factorization of A. + + On entry, a square, positive definite matrix. On exit, the matrix is overwritten with the + the Cholesky factorization. + The number of rows or columns in the matrix. + This is equivalent to the POTRF LAPACK routine. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves A*X=B for X using Cholesky factorization. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRF add POTRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRS LAPACK routine. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. Has to be different than . + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The column to solve for. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the R matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A M by M matrix that holds the Q matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the Q matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A N by N matrix that holds the R matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Perform calculation of Q or R + + Work array + Index of column in work array + Q or R matrices + The first row in + The last row + The first column + The last column + Number of available CPUs + + + + Generate column from initial matrix to work array + + Work array + Initial matrix + The number of rows in matrix + The first row + Column index + + + + Solves A*X=B for X using QR factorization of A. + + The A matrix. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Solves A*X=B for X using a previously QR factored matrix. + + The Q matrix obtained by calling . + The R matrix obtained by calling . + The number of rows in the A matrix. + The number of columns in the A matrix. + Contains additional information on Q. Only used for the native solver + and can be null for the managed provider. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Computes the singular value decomposition of A. + + Compute the singular U and VT vectors or not. + On entry, the M by N matrix to decompose. On exit, A may be overwritten. + The number of rows in the A matrix. + The number of columns in the A matrix. + The singular values of A in ascending value. + If is true, on exit U contains the left + singular vectors. + If is true, on exit VT contains the transposed + right singular vectors. + This is equivalent to the GESVD LAPACK routine. + + + + Solves A*X=B for X using the singular value decomposition of A. + + On entry, the M by N matrix to decompose. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Solves A*X=B for X using a previously SVD decomposed matrix. + + The number of rows in the A matrix. + The number of columns in the A matrix. + The s values returned by . + The left singular vectors returned by . + The right singular vectors returned by . + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Computes the eigenvalues and eigenvectors of a matrix. + + Whether the matrix is symmetric or not. + The order of the matrix. + The matrix to decompose. The lenth of the array must be order * order. + On output, the matrix contains the eigen vectors. The lenth of the array must be order * order. + On output, the eigen values (λ) of matrix in ascending value. The length of the arry must . + On output, the block diagonal eigenvalue matrix. The lenth of the array must be order * order. + + + + Assumes that and have already been transposed. + + + + + Assumes that and have already been transposed. + + + + + Try to find out whether the provider is available, at least in principle. + Verification may still fail if available, but it will certainly fail if unavailable. + + + + + Initialize and verify that the provided is indeed available. If not, fall back to alternatives like the managed provider + + + + + Assumes that and have already been transposed. + + + + + Assumes that and have already been transposed. + + + + + Adds a scaled vector to another: result = y + alpha*x. + + The vector to update. + The value to scale by. + The vector to add to . + The result of the addition. + This is similar to the AXPY BLAS routine. + + + + Scales an array. Can be used to scale a vector and a matrix. + + The scalar. + The values to scale. + This result of the scaling. + This is similar to the SCAL BLAS routine. + + + + Conjugates an array. Can be used to conjugate a vector and a matrix. + + The values to conjugate. + This result of the conjugation. + + + + Computes the dot product of x and y. + + The vector x. + The vector y. + The dot product of x and y. + This is equivalent to the DOT BLAS routine. + + + + Does a point wise add of two arrays z = x + y. This can be used + to add vectors or matrices. + + The array x. + The array y. + The result of the addition. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise subtraction of two arrays z = x - y. This can be used + to subtract vectors or matrices. + + The array x. + The array y. + The result of the subtraction. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise multiplication of two arrays z = x * y. This can be used + to multiple elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise multiplication. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise division of two arrays z = x / y. This can be used + to divide elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise division. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise power of two arrays z = x ^ y. This can be used + to raise elements of vectors or matrices to the powers of another vector or matrix. + + The array x. + The array y. + The result of the point wise power. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Computes the requested of the matrix. + + The type of norm to compute. + The number of rows. + The number of columns. + The matrix to compute the norm from. + + The requested of the matrix. + + + + + Multiples two matrices. result = x * y + + The x matrix. + The number of rows in the x matrix. + The number of columns in the x matrix. + The y matrix. + The number of rows in the y matrix. + The number of columns in the y matrix. + Where to store the result of the multiplication. + This is a simplified version of the BLAS GEMM routine with alpha + set to 1.0 and beta set to 0.0, and x and y are not transposed. + + + + Multiplies two matrices and updates another with the result. c = alpha*op(a)*op(b) + beta*c + + How to transpose the matrix. + How to transpose the matrix. + The value to scale matrix. + The a matrix. + The number of rows in the matrix. + The number of columns in the matrix. + The b matrix + The number of rows in the matrix. + The number of columns in the matrix. + The value to scale the matrix. + The c matrix. + + + + Cache-Oblivious Matrix Multiplication + + if set to true transpose matrix A. + if set to true transpose matrix B. + The value to scale the matrix A with. + The matrix A. + Row-shift of the left matrix + Column-shift of the left matrix + The matrix B. + Row-shift of the right matrix + Column-shift of the right matrix + The matrix C. + Row-shift of the result matrix + Column-shift of the result matrix + The number of rows of matrix op(A) and of the matrix C. + The number of columns of matrix op(B) and of the matrix C. + The number of columns of matrix op(A) and the rows of the matrix op(B). + The constant number of rows of matrix op(A) and of the matrix C. + The constant number of columns of matrix op(B) and of the matrix C. + The constant number of columns of matrix op(A) and the rows of the matrix op(B). + Indicates if this is the first recursion. + + + + Computes the LUP factorization of A. P*A = L*U. + + An by matrix. The matrix is overwritten with the + the LU factorization on exit. The lower triangular factor L is stored in under the diagonal of (the diagonal is always 1.0 + for the L factor). The upper triangular factor U is stored on and above the diagonal of . + The order of the square matrix . + On exit, it contains the pivot indices. The size of the array must be . + This is equivalent to the GETRF LAPACK routine. + + + + Computes the inverse of matrix using LU factorization. + + The N by N matrix to invert. Contains the inverse On exit. + The order of the square matrix . + This is equivalent to the GETRF and GETRI LAPACK routines. + + + + Computes the inverse of a previously factored matrix. + + The LU factored N by N matrix. Contains the inverse On exit. + The order of the square matrix . + The pivot indices of . + This is equivalent to the GETRI LAPACK routine. + + + + Solves A*X=B for X using LU factorization. + + The number of columns of B. + The square matrix A. + The order of the square matrix . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRF and GETRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The number of columns of B. + The factored A matrix. + The order of the square matrix . + The pivot indices of . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRS LAPACK routine. + + + + Computes the Cholesky factorization of A. + + On entry, a square, positive definite matrix. On exit, the matrix is overwritten with the + the Cholesky factorization. + The number of rows or columns in the matrix. + This is equivalent to the POTRF LAPACK routine. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves A*X=B for X using Cholesky factorization. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRF add POTRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. Has to be different than . + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRS LAPACK routine. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. Has to be different than . + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The column to solve for. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the R matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A M by M matrix that holds the Q matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the Q matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A N by N matrix that holds the R matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Perform calculation of Q or R + + Work array + Index of column in work array + Q or R matrices + The first row in + The last row + The first column + The last column + Number of available CPUs + + + + Generate column from initial matrix to work array + + Work array + Initial matrix + The number of rows in matrix + The first row + Column index + + + + Solves A*X=B for X using QR factorization of A. + + The A matrix. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Solves A*X=B for X using a previously QR factored matrix. + + The Q matrix obtained by calling . + The R matrix obtained by calling . + The number of rows in the A matrix. + The number of columns in the A matrix. + Contains additional information on Q. Only used for the native solver + and can be null for the managed provider. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Computes the singular value decomposition of A. + + Compute the singular U and VT vectors or not. + On entry, the M by N matrix to decompose. On exit, A may be overwritten. + The number of rows in the A matrix. + The number of columns in the A matrix. + The singular values of A in ascending value. + If is true, on exit U contains the left + singular vectors. + If is true, on exit VT contains the transposed + right singular vectors. + This is equivalent to the GESVD LAPACK routine. + + + + Given the Cartesian coordinates (da, db) of a point p, these function return the parameters da, db, c, and s + associated with the Givens rotation that zeros the y-coordinate of the point. + + Provides the x-coordinate of the point p. On exit contains the parameter r associated with the Givens rotation + Provides the y-coordinate of the point p. On exit contains the parameter z associated with the Givens rotation + Contains the parameter c associated with the Givens rotation + Contains the parameter s associated with the Givens rotation + This is equivalent to the DROTG LAPACK routine. + + + + Solves A*X=B for X using the singular value decomposition of A. + + On entry, the M by N matrix to decompose. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Solves A*X=B for X using a previously SVD decomposed matrix. + + The number of rows in the A matrix. + The number of columns in the A matrix. + The s values returned by . + The left singular vectors returned by . + The right singular vectors returned by . + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Computes the eigenvalues and eigenvectors of a matrix. + + Whether the matrix is symmetric or not. + The order of the matrix. + The matrix to decompose. The lenth of the array must be order * order. + On output, the matrix contains the eigen vectors. The lenth of the array must be order * order. + On output, the eigen values (λ) of matrix in ascending value. The length of the arry must . + On output, the block diagonal eigenvalue matrix. The lenth of the array must be order * order. + + + + Adds a scaled vector to another: result = y + alpha*x. + + The vector to update. + The value to scale by. + The vector to add to . + The result of the addition. + This is similar to the AXPY BLAS routine. + + + + Scales an array. Can be used to scale a vector and a matrix. + + The scalar. + The values to scale. + This result of the scaling. + This is similar to the SCAL BLAS routine. + + + + Conjugates an array. Can be used to conjugate a vector and a matrix. + + The values to conjugate. + This result of the conjugation. + + + + Computes the dot product of x and y. + + The vector x. + The vector y. + The dot product of x and y. + This is equivalent to the DOT BLAS routine. + + + + Does a point wise add of two arrays z = x + y. This can be used + to add vectors or matrices. + + The array x. + The array y. + The result of the addition. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise subtraction of two arrays z = x - y. This can be used + to subtract vectors or matrices. + + The array x. + The array y. + The result of the subtraction. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise multiplication of two arrays z = x * y. This can be used + to multiple elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise multiplication. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise division of two arrays z = x / y. This can be used + to divide elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise division. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise power of two arrays z = x ^ y. This can be used + to raise elements of vectors or matrices to the powers of another vector or matrix. + + The array x. + The array y. + The result of the point wise power. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Computes the requested of the matrix. + + The type of norm to compute. + The number of rows. + The number of columns. + The matrix to compute the norm from. + + The requested of the matrix. + + + + + Multiples two matrices. result = x * y + + The x matrix. + The number of rows in the x matrix. + The number of columns in the x matrix. + The y matrix. + The number of rows in the y matrix. + The number of columns in the y matrix. + Where to store the result of the multiplication. + This is a simplified version of the BLAS GEMM routine with alpha + set to 1.0 and beta set to 0.0, and x and y are not transposed. + + + + Multiplies two matrices and updates another with the result. c = alpha*op(a)*op(b) + beta*c + + How to transpose the matrix. + How to transpose the matrix. + The value to scale matrix. + The a matrix. + The number of rows in the matrix. + The number of columns in the matrix. + The b matrix + The number of rows in the matrix. + The number of columns in the matrix. + The value to scale the matrix. + The c matrix. + + + + Cache-Oblivious Matrix Multiplication + + if set to true transpose matrix A. + if set to true transpose matrix B. + The value to scale the matrix A with. + The matrix A. + Row-shift of the left matrix + Column-shift of the left matrix + The matrix B. + Row-shift of the right matrix + Column-shift of the right matrix + The matrix C. + Row-shift of the result matrix + Column-shift of the result matrix + The number of rows of matrix op(A) and of the matrix C. + The number of columns of matrix op(B) and of the matrix C. + The number of columns of matrix op(A) and the rows of the matrix op(B). + The constant number of rows of matrix op(A) and of the matrix C. + The constant number of columns of matrix op(B) and of the matrix C. + The constant number of columns of matrix op(A) and the rows of the matrix op(B). + Indicates if this is the first recursion. + + + + Computes the LUP factorization of A. P*A = L*U. + + An by matrix. The matrix is overwritten with the + the LU factorization on exit. The lower triangular factor L is stored in under the diagonal of (the diagonal is always 1.0 + for the L factor). The upper triangular factor U is stored on and above the diagonal of . + The order of the square matrix . + On exit, it contains the pivot indices. The size of the array must be . + This is equivalent to the GETRF LAPACK routine. + + + + Computes the inverse of matrix using LU factorization. + + The N by N matrix to invert. Contains the inverse On exit. + The order of the square matrix . + This is equivalent to the GETRF and GETRI LAPACK routines. + + + + Computes the inverse of a previously factored matrix. + + The LU factored N by N matrix. Contains the inverse On exit. + The order of the square matrix . + The pivot indices of . + This is equivalent to the GETRI LAPACK routine. + + + + Solves A*X=B for X using LU factorization. + + The number of columns of B. + The square matrix A. + The order of the square matrix . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRF and GETRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The number of columns of B. + The factored A matrix. + The order of the square matrix . + The pivot indices of . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRS LAPACK routine. + + + + Computes the Cholesky factorization of A. + + On entry, a square, positive definite matrix. On exit, the matrix is overwritten with the + the Cholesky factorization. + The number of rows or columns in the matrix. + This is equivalent to the POTRF LAPACK routine. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves A*X=B for X using Cholesky factorization. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRF add POTRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRS LAPACK routine. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. Has to be different than . + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The column to solve for. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the R matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A M by M matrix that holds the Q matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the Q matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A N by N matrix that holds the R matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Perform calculation of Q or R + + Work array + Index of column in work array + Q or R matrices + The first row in + The last row + The first column + The last column + Number of available CPUs + + + + Generate column from initial matrix to work array + + Work array + Initial matrix + The number of rows in matrix + The first row + Column index + + + + Solves A*X=B for X using QR factorization of A. + + The A matrix. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Solves A*X=B for X using a previously QR factored matrix. + + The Q matrix obtained by calling . + The R matrix obtained by calling . + The number of rows in the A matrix. + The number of columns in the A matrix. + Contains additional information on Q. Only used for the native solver + and can be null for the managed provider. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Computes the singular value decomposition of A. + + Compute the singular U and VT vectors or not. + On entry, the M by N matrix to decompose. On exit, A may be overwritten. + The number of rows in the A matrix. + The number of columns in the A matrix. + The singular values of A in ascending value. + If is true, on exit U contains the left + singular vectors. + If is true, on exit VT contains the transposed + right singular vectors. + This is equivalent to the GESVD LAPACK routine. + + + + Given the Cartesian coordinates (da, db) of a point p, these function return the parameters da, db, c, and s + associated with the Givens rotation that zeros the y-coordinate of the point. + + Provides the x-coordinate of the point p. On exit contains the parameter r associated with the Givens rotation + Provides the y-coordinate of the point p. On exit contains the parameter z associated with the Givens rotation + Contains the parameter c associated with the Givens rotation + Contains the parameter s associated with the Givens rotation + This is equivalent to the DROTG LAPACK routine. + + + + Solves A*X=B for X using the singular value decomposition of A. + + On entry, the M by N matrix to decompose. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Solves A*X=B for X using a previously SVD decomposed matrix. + + The number of rows in the A matrix. + The number of columns in the A matrix. + The s values returned by . + The left singular vectors returned by . + The right singular vectors returned by . + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Computes the eigenvalues and eigenvectors of a matrix. + + Whether the matrix is symmetric or not. + The order of the matrix. + The matrix to decompose. The lenth of the array must be order * order. + On output, the matrix contains the eigen vectors. The lenth of the array must be order * order. + On output, the eigen values (λ) of matrix in ascending value. The length of the arry must . + On output, the block diagonal eigenvalue matrix. The lenth of the array must be order * order. + + + + Multiplicative congruential generator using a modulus of 2^31-1 and a multiplier of 1132489760. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + if set to true, the class is thread safe. + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Multiplicative congruential generator using a modulus of 2^59 and a multiplier of 13^13. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + The seed is set to 1, if the zero is used as the seed. + if set to true , the class is thread safe. + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Random number generator using Mersenne Twister 19937 algorithm. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + Uses the value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + if set to true, the class is thread safe. + + + + Default instance, thread-safe. + + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Returns a random 32-bit signed integer greater than or equal to zero and less than + + + + + Fills the elements of a specified array of bytes with random numbers in full range, including zero and 255 (). + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + A 32-bit combined multiple recursive generator with 2 components of order 3. + + Based off of P. L'Ecuyer, "Combined Multiple Recursive Random Number Generators," Operations Research, 44, 5 (1996), 816--822. + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + if set to true, the class is thread safe. + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Represents a Parallel Additive Lagged Fibonacci pseudo-random number generator. + + + The type bases upon the implementation in the + Boost Random Number Library. + It uses the modulus 232 and by default the "lags" 418 and 1279. Some popular pairs are presented on + Wikipedia - Lagged Fibonacci generator. + + + + + Default value for the ShortLag + + + + + Default value for the LongLag + + + + + The multiplier to compute a double-precision floating point number [0, 1) + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + if set to true, the class is thread safe. + The ShortLag value + TheLongLag value + + + + Gets the short lag of the Lagged Fibonacci pseudo-random number generator. + + + + + Gets the long lag of the Lagged Fibonacci pseudo-random number generator. + + + + + Stores an array of random numbers + + + + + Stores an index for the random number array element that will be accessed next. + + + + + Fills the array with new unsigned random numbers. + + + Generated random numbers are 32-bit unsigned integers greater than or equal to 0 + and less than or equal to . + + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Returns a random 32-bit signed integer greater than or equal to zero and less than + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + This class implements extension methods for the System.Random class. The extension methods generate + pseudo-random distributed numbers for types other than double and int32. + + + + + Fills an array with uniform random numbers greater than or equal to 0.0 and less than 1.0. + + The random number generator. + The array to fill with random values. + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns an array of uniform random numbers greater than or equal to 0.0 and less than 1.0. + + The random number generator. + The size of the array to fill. + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns an infinite sequence of uniform random numbers greater than or equal to 0.0 and less than 1.0. + + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns an array of uniform random bytes. + + The random number generator. + The size of the array to fill. + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Fills an array with uniform random 32-bit signed integers greater than or equal to zero and less than . + + The random number generator. + The array to fill with random values. + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Fills an array with uniform random 32-bit signed integers within the specified range. + + The random number generator. + The array to fill with random values. + Lower bound, inclusive. + Upper bound, exclusive. + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns an infinite sequence of uniform random numbers greater than or equal to 0.0 and less than 1.0. + + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns a nonnegative random number less than . + + The random number generator. + + A 64-bit signed integer greater than or equal to 0, and less than ; that is, + the range of return values includes 0 but not . + + + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns a random number of the full Int32 range. + + The random number generator. + + A 32-bit signed integer of the full range, including 0, negative numbers, + and . + + + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns a random number of the full Int64 range. + + The random number generator. + + A 64-bit signed integer of the full range, including 0, negative numbers, + and . + + + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns a nonnegative decimal floating point random number less than 1.0. + + The random number generator. + + A decimal floating point number greater than or equal to 0.0, and less than 1.0; that is, + the range of return values includes 0.0 but not 1.0. + + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns a random boolean. + + The random number generator. + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Provides a time-dependent seed value, matching the default behavior of System.Random. + WARNING: There is no randomness in this seed and quick repeated calls can cause + the same seed value. Do not use for cryptography! + + + + + Provides a seed based on time and unique GUIDs. + WARNING: There is only low randomness in this seed, but at least quick repeated + calls will result in different seed values. Do not use for cryptography! + + + + + Provides a seed based on an internal random number generator (crypto if available), time and unique GUIDs. + WARNING: There is only medium randomness in this seed, but quick repeated + calls will result in different seed values. Do not use for cryptography! + + + + + Base class for random number generators. This class introduces a layer between + and the Math.Net Numerics random number generators to provide thread safety. + When used directly it use the System.Random as random number source. + + + + + Initializes a new instance of the class using + the value of to set whether + the instance is thread safe or not. + + + + + Initializes a new instance of the class. + + if set to true , the class is thread safe. + Thread safe instances are two and half times slower than non-thread + safe classes. + + + + Fills an array with uniform random numbers greater than or equal to 0.0 and less than 1.0. + + The array to fill with random values. + + + + Returns an array of uniform random numbers greater than or equal to 0.0 and less than 1.0. + + The size of the array to fill. + + + + Returns an infinite sequence of uniform random numbers greater than or equal to 0.0 and less than 1.0. + + + + + Returns a random 32-bit signed integer greater than or equal to zero and less than . + + + + + Returns a random number less then a specified maximum. + + The exclusive upper bound of the random number returned. Range: maxExclusive ≥ 1. + A 32-bit signed integer less than . + is zero or negative. + + + + Returns a random number within a specified range. + + The inclusive lower bound of the random number returned. + The exclusive upper bound of the random number returned. Range: maxExclusive > minExclusive. + + A 32-bit signed integer greater than or equal to and less than ; that is, the range of return values includes but not . If equals , is returned. + + is greater than . + + + + Fills an array with random 32-bit signed integers greater than or equal to zero and less than . + + The array to fill with random values. + + + + Returns an array with random 32-bit signed integers greater than or equal to zero and less than . + + The size of the array to fill. + + + + Fills an array with random numbers within a specified range. + + The array to fill with random values. + The exclusive upper bound of the random number returned. Range: maxExclusive ≥ 1. + + + + Returns an array with random 32-bit signed integers within the specified range. + + The size of the array to fill. + The exclusive upper bound of the random number returned. Range: maxExclusive ≥ 1. + + + + Fills an array with random numbers within a specified range. + + The array to fill with random values. + The inclusive lower bound of the random number returned. + The exclusive upper bound of the random number returned. Range: maxExclusive > minExclusive. + + + + Returns an array with random 32-bit signed integers within the specified range. + + The size of the array to fill. + The inclusive lower bound of the random number returned. + The exclusive upper bound of the random number returned. Range: maxExclusive > minExclusive. + + + + Returns an infinite sequence of random 32-bit signed integers greater than or equal to zero and less than . + + + + + Returns an infinite sequence of random numbers within a specified range. + + The inclusive lower bound of the random number returned. + The exclusive upper bound of the random number returned. Range: maxExclusive > minExclusive. + + + + Fills the elements of a specified array of bytes with random numbers. + + An array of bytes to contain random numbers. + is null. + + + + Returns a random number between 0.0 and 1.0. + + A double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Returns a random 32-bit signed integer greater than or equal to zero and less than 2147483647 (). + + + + + Fills the elements of a specified array of bytes with random numbers in full range, including zero and 255 (). + + + + + Returns a random N-bit signed integer greater than or equal to zero and less than 2^N. + N (bit count) is expected to be greater than zero and less than 32 (not verified). + + + + + Returns a random N-bit signed long integer greater than or equal to zero and less than 2^N. + N (bit count) is expected to be greater than zero and less than 64 (not verified). + + + + + Returns a random 32-bit signed integer within the specified range. + + The exclusive upper bound of the random number returned. Range: maxExclusive ≥ 2 (not verified, must be ensured by caller). + + + + Returns a random 32-bit signed integer within the specified range. + + The inclusive lower bound of the random number returned. + The exclusive upper bound of the random number returned. Range: maxExclusive ≥ minExclusive + 2 (not verified, must be ensured by caller). + + + + A random number generator based on the class in the .NET library. + + + + + Construct a new random number generator with a random seed. + + + + + Construct a new random number generator with random seed. + + if set to true , the class is thread safe. + + + + Construct a new random number generator with random seed. + + The seed value. + + + + Construct a new random number generator with random seed. + + The seed value. + if set to true , the class is thread safe. + + + + Default instance, thread-safe. + + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Returns a random 32-bit signed integer greater than or equal to zero and less than + + + + + Returns a random 32-bit signed integer within the specified range. + + The exclusive upper bound of the random number returned. Range: maxExclusive ≥ 2 (not verified, must be ensured by caller). + + + + Returns a random 32-bit signed integer within the specified range. + + The inclusive lower bound of the random number returned. + The exclusive upper bound of the random number returned. Range: maxExclusive ≥ minExclusive + 2 (not verified, must be ensured by caller). + + + + Fills the elements of a specified array of bytes with random numbers in full range, including zero and 255 (). + + + + + Fill an array with uniform random numbers greater than or equal to 0.0 and less than 1.0. + WARNING: potentially very short random sequence length, can generate repeated partial sequences. + + Parallelized on large length, but also supports being called in parallel from multiple threads + + + + Returns an array of uniform random numbers greater than or equal to 0.0 and less than 1.0. + WARNING: potentially very short random sequence length, can generate repeated partial sequences. + + Parallelized on large length, but also supports being called in parallel from multiple threads + + + + Returns an infinite sequence of uniform random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Wichmann-Hill’s 1982 combined multiplicative congruential generator. + + See: Wichmann, B. A. & Hill, I. D. (1982), "Algorithm AS 183: + An efficient and portable pseudo-random number generator". Applied Statistics 31 (1982) 188-190 + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + The seed is set to 1, if the zero is used as the seed. + if set to true , the class is thread safe. + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Wichmann-Hill’s 2006 combined multiplicative congruential generator. + + See: Wichmann, B. A. & Hill, I. D. (2006), "Generating good pseudo-random numbers". + Computational Statistics & Data Analysis 51:3 (2006) 1614-1622 + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + The seed is set to 1, if the zero is used as the seed. + if set to true , the class is thread safe. + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Implements a multiply-with-carry Xorshift pseudo random number generator (RNG) specified in Marsaglia, George. (2003). Xorshift RNGs. + Xn = a * Xn−3 + c mod 2^32 + http://www.jstatsoft.org/v08/i14/paper + + + + + The default value for X1. + + + + + The default value for X2. + + + + + The default value for the multiplier. + + + + + The default value for the carry over. + + + + + The multiplier to compute a double-precision floating point number [0, 1) + + + + + Seed or last but three unsigned random number. + + + + + Last but two unsigned random number. + + + + + Last but one unsigned random number. + + + + + The value of the carry over. + + + + + The multiplier. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + Uses the default values of: + + a = 916905990 + c = 13579 + X1 = 77465321 + X2 = 362436069 + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + The multiply value + The initial carry value. + The initial value if X1. + The initial value if X2. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + Note: must be less than . + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + Uses the default values of: + + a = 916905990 + c = 13579 + X1 = 77465321 + X2 = 362436069 + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + The multiply value + The initial carry value. + The initial value if X1. + The initial value if X2. + must be less than . + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + Uses the default values of: + + a = 916905990 + c = 13579 + X1 = 77465321 + X2 = 362436069 + + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + The multiply value + The initial carry value. + The initial value if X1. + The initial value if X2. + must be less than . + + + + Initializes a new instance of the class. + + The seed value. + if set to true, the class is thread safe. + + Uses the default values of: + + a = 916905990 + c = 13579 + X1 = 77465321 + X2 = 362436069 + + + + + Initializes a new instance of the class. + + The seed value. + if set to true, the class is thread safe. + The multiply value + The initial carry value. + The initial value if X1. + The initial value if X2. + must be less than . + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Returns a random 32-bit signed integer greater than or equal to zero and less than + + + + + Fills the elements of a specified array of bytes with random numbers in full range, including zero and 255 (). + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Bisection root-finding algorithm. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + Guess for the low value of the range where the root is supposed to be. Will be expanded if needed. + Guess for the high value of the range where the root is supposed to be. Will be expanded if needed. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Factor at which to expand the bounds, if needed. Default 1.6. + Maximum number of expand iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy for both the root and the function value at the root. The root will be refined until the accuracy or the maximum number of iterations is reached. + Maximum number of iterations. Usually 100. + The root that was found, if any. Undefined if the function returns false. + True if a root with the specified accuracy was found, else false. + + + + Algorithm by by Brent, Van Wijngaarden, Dekker et al. + Implementation inspired by Press, Teukolsky, Vetterling, and Flannery, "Numerical Recipes in C", 2nd edition, Cambridge University Press + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + Guess for the low value of the range where the root is supposed to be. Will be expanded if needed. + Guess for the high value of the range where the root is supposed to be. Will be expanded if needed. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Factor at which to expand the bounds, if needed. Default 1.6. + Maximum number of expand iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. + Maximum number of iterations. Usually 100. + The root that was found, if any. Undefined if the function returns false. + True if a root with the specified accuracy was found, else false. + + + Helper method useful for preventing rounding errors. + a*sign(b) + + + + Algorithm by Broyden. + Implementation inspired by Press, Teukolsky, Vetterling, and Flannery, "Numerical Recipes in C", 2nd edition, Cambridge University Press + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + Initial guess of the root. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + Initial guess of the root. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. + Maximum number of iterations. Usually 100. + The root that was found, if any. Undefined if the function returns false. + True if a root with the specified accuracy was found, else false. + + + + Helper method to calculate an approximation of the Jacobian. + + The function. + The argument (initial guess). + The result (of initial guess). + + + + Finds roots to the cubic equation x^3 + a2*x^2 + a1*x + a0 = 0 + Implements the cubic formula in http://mathworld.wolfram.com/CubicFormula.html + + + + + Q and R are transformed variables. + + + + + n^(1/3) - work around a negative double raised to (1/3) + + + + + Find all real-valued roots of the cubic equation a0 + a1*x + a2*x^2 + x^3 = 0. + Note the special coefficient order ascending by exponent (consistent with polynomials). + + + + + Find all three complex roots of the cubic equation d + c*x + b*x^2 + a*x^3 = 0. + Note the special coefficient order ascending by exponent (consistent with polynomials). + + + + + Pure Newton-Raphson root-finding algorithm without any recovery measures in cases it behaves badly. + The algorithm aborts immediately if the root leaves the bound interval. + + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first derivative of the function to find roots from. + The low value of the range where the root is supposed to be. Aborts if it leaves the interval. + The high value of the range where the root is supposed to be. Aborts if it leaves the interval. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first derivative of the function to find roots from. + Initial guess of the root. + The low value of the range where the root is supposed to be. Aborts if it leaves the interval. Default MinValue. + The high value of the range where the root is supposed to be. Aborts if it leaves the interval. Default MaxValue. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first derivative of the function to find roots from. + Initial guess of the root. + The low value of the range where the root is supposed to be. Aborts if it leaves the interval. + The high value of the range where the root is supposed to be. Aborts if it leaves the interval. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Example: 1e-14. + Maximum number of iterations. Example: 100. + The root that was found, if any. Undefined if the function returns false. + True if a root with the specified accuracy was found, else false. + + + + Robust Newton-Raphson root-finding algorithm that falls back to bisection when overshooting or converging too slow, or to subdivision on lacking bracketing. + + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first derivative of the function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + How many parts an interval should be split into for zero crossing scanning in case of lacking bracketing. Default 20. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first derivative of the function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Example: 1e-14. + Maximum number of iterations. Example: 100. + How many parts an interval should be split into for zero crossing scanning in case of lacking bracketing. Example: 20. + The root that was found, if any. Undefined if the function returns false. + True if a root with the specified accuracy was found, else false. + + + + Pure Secant root-finding algorithm without any recovery measures in cases it behaves badly. + The algorithm aborts immediately if the root leaves the bound interval. + + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first guess of the root within the bounds specified. + The second guess of the root within the bounds specified. + The low value of the range where the root is supposed to be. Aborts if it leaves the interval. Default MinValue. + The high value of the range where the root is supposed to be. Aborts if it leaves the interval. Default MaxValue. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first guess of the root within the bounds specified. + The second guess of the root within the bounds specified. + The low value of the range where the root is supposed to be. Aborts if it leaves the interval. + The low value of the range where the root is supposed to be. Aborts if it leaves the interval. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Example: 1e-14. + Maximum number of iterations. Example: 100. + The root that was found, if any. Undefined if the function returns false. + True if a root with the specified accuracy was found, else false + + + Detect a range containing at least one root. + The function to detect roots from. + Lower value of the range. + Upper value of the range + The growing factor of research. Usually 1.6. + Maximum number of iterations. Usually 50. + True if the bracketing operation succeeded, false otherwise. + This iterative methods stops when two values with opposite signs are found. + + + + Sorting algorithms for single, tuple and triple lists. + + + + + Sort a list of keys, in place using the quick sort algorithm using the quick sort algorithm. + + The type of elements in the key list. + List to sort. + Comparison, defining the sort order. + + + + Sort a list of keys and items with respect to the keys, in place using the quick sort algorithm. + + The type of elements in the key list. + The type of elements in the item list. + List to sort. + List to permute the same way as the key list. + Comparison, defining the sort order. + + + + Sort a list of keys, items1 and items2 with respect to the keys, in place using the quick sort algorithm. + + The type of elements in the key list. + The type of elements in the first item list. + The type of elements in the second item list. + List to sort. + First list to permute the same way as the key list. + Second list to permute the same way as the key list. + Comparison, defining the sort order. + + + + Sort a range of a list of keys, in place using the quick sort algorithm. + + The type of element in the list. + List to sort. + The zero-based starting index of the range to sort. + The length of the range to sort. + Comparison, defining the sort order. + + + + Sort a list of keys and items with respect to the keys, in place using the quick sort algorithm. + + The type of elements in the key list. + The type of elements in the item list. + List to sort. + List to permute the same way as the key list. + The zero-based starting index of the range to sort. + The length of the range to sort. + Comparison, defining the sort order. + + + + Sort a list of keys, items1 and items2 with respect to the keys, in place using the quick sort algorithm. + + The type of elements in the key list. + The type of elements in the first item list. + The type of elements in the second item list. + List to sort. + First list to permute the same way as the key list. + Second list to permute the same way as the key list. + The zero-based starting index of the range to sort. + The length of the range to sort. + Comparison, defining the sort order. + + + + Sort a list of keys and items with respect to the keys, in place using the quick sort algorithm. + + The type of elements in the primary list. + The type of elements in the secondary list. + List to sort. + List to sort on duplicate primary items, and permute the same way as the key list. + Comparison, defining the primary sort order. + Comparison, defining the secondary sort order. + + + + Recursive implementation for an in place quick sort on a list. + + The type of the list on which the quick sort is performed. + The list which is sorted using quick sort. + The method with which to compare two elements of the quick sort. + The left boundary of the quick sort. + The right boundary of the quick sort. + + + + Recursive implementation for an in place quick sort on a list while reordering one other list accordingly. + + The type of the list on which the quick sort is performed. + The type of the list which is automatically reordered accordingly. + The list which is sorted using quick sort. + The list which is automatically reordered accordingly. + The method with which to compare two elements of the quick sort. + The left boundary of the quick sort. + The right boundary of the quick sort. + + + + Recursive implementation for an in place quick sort on one list while reordering two other lists accordingly. + + The type of the list on which the quick sort is performed. + The type of the first list which is automatically reordered accordingly. + The type of the second list which is automatically reordered accordingly. + The list which is sorted using quick sort. + The first list which is automatically reordered accordingly. + The second list which is automatically reordered accordingly. + The method with which to compare two elements of the quick sort. + The left boundary of the quick sort. + The right boundary of the quick sort. + + + + Recursive implementation for an in place quick sort on the primary and then by the secondary list while reordering one secondary list accordingly. + + The type of the primary list. + The type of the secondary list. + The list which is sorted using quick sort. + The list which is sorted secondarily (on primary duplicates) and automatically reordered accordingly. + The method with which to compare two elements of the primary list. + The method with which to compare two elements of the secondary list. + The left boundary of the quick sort. + The right boundary of the quick sort. + + + + Performs an in place swap of two elements in a list. + + The type of elements stored in the list. + The list in which the elements are stored. + The index of the first element of the swap. + The index of the second element of the swap. + + + + This partial implementation of the SpecialFunctions class contains all methods related to the error function. + + + This partial implementation of the SpecialFunctions class contains all methods related to the harmonic function. + + + This partial implementation of the SpecialFunctions class contains all methods related to the logistic function. + + + This partial implementation of the SpecialFunctions class contains all methods related to the modified bessel function. + + + This partial implementation of the SpecialFunctions class contains all methods related to the modified bessel function. + + + + + Computes the logarithm of the Euler Beta function. + + The first Beta parameter, a positive real number. + The second Beta parameter, a positive real number. + The logarithm of the Euler Beta function evaluated at z,w. + If or are not positive. + + + + Computes the Euler Beta function. + + The first Beta parameter, a positive real number. + The second Beta parameter, a positive real number. + The Euler Beta function evaluated at z,w. + If or are not positive. + + + + Returns the lower incomplete (unregularized) beta function + B(a,b,x) = int(t^(a-1)*(1-t)^(b-1),t=0..x) for real a > 0, b > 0, 1 >= x >= 0. + + The first Beta parameter, a positive real number. + The second Beta parameter, a positive real number. + The upper limit of the integral. + The lower incomplete (unregularized) beta function. + + + + Returns the regularized lower incomplete beta function + I_x(a,b) = 1/Beta(a,b) * int(t^(a-1)*(1-t)^(b-1),t=0..x) for real a > 0, b > 0, 1 >= x >= 0. + + The first Beta parameter, a positive real number. + The second Beta parameter, a positive real number. + The upper limit of the integral. + The regularized lower incomplete beta function. + + + + ************************************** + COEFFICIENTS FOR METHOD ErfImp * + ************************************** + + Polynomial coefficients for a numerator of ErfImp + calculation for Erf(x) in the interval [1e-10, 0.5]. + + + + Polynomial coefficients for adenominator of ErfImp + calculation for Erf(x) in the interval [1e-10, 0.5]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [0.5, 0.75]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [0.5, 0.75]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [0.75, 1.25]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [0.75, 1.25]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [1.25, 2.25]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [1.25, 2.25]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [2.25, 3.5]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [2.25, 3.5]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [3.5, 5.25]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [3.5, 5.25]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [5.25, 8]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [5.25, 8]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [8, 11.5]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [8, 11.5]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [11.5, 17]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [11.5, 17]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [17, 24]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [17, 24]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [24, 38]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [24, 38]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [38, 60]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [38, 60]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [60, 85]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [60, 85]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [85, 110]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [85, 110]. + + + + + ************************************** + COEFFICIENTS FOR METHOD ErfInvImp * + ************************************** + + Polynomial coefficients for a numerator of ErfInvImp + calculation for Erf^-1(z) in the interval [0, 0.5]. + + + + Polynomial coefficients for a denominator of ErfInvImp + calculation for Erf^-1(z) in the interval [0, 0.5]. + + + + Polynomial coefficients for a numerator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.5, 0.75]. + + + + Polynomial coefficients for a denominator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.5, 0.75]. + + + + Polynomial coefficients for a numerator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x less than 3. + + + + Polynomial coefficients for a denominator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x less than 3. + + + + Polynomial coefficients for a numerator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x between 3 and 6. + + + + Polynomial coefficients for a denominator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x between 3 and 6. + + + + Polynomial coefficients for a numerator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x between 6 and 18. + + + + Polynomial coefficients for a denominator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x between 6 and 18. + + + + Polynomial coefficients for a numerator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x between 18 and 44. + + + + Polynomial coefficients for a denominator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x between 18 and 44. + + + + Polynomial coefficients for a numerator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x greater than 44. + + + + Polynomial coefficients for a denominator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x greater than 44. + + + + Calculates the error function. + The value to evaluate. + the error function evaluated at given value. + + + returns 1 if x == double.PositiveInfinity. + returns -1 if x == double.NegativeInfinity. + + + + + Calculates the complementary error function. + The value to evaluate. + the complementary error function evaluated at given value. + + + returns 0 if x == double.PositiveInfinity. + returns 2 if x == double.NegativeInfinity. + + + + + Calculates the inverse error function evaluated at z. + The inverse error function evaluated at given value. + + + returns double.PositiveInfinity if z >= 1.0. + returns double.NegativeInfinity if z <= -1.0. + + + Calculates the inverse error function evaluated at z. + value to evaluate. + the inverse error function evaluated at Z. + + + + Implementation of the error function. + + Where to evaluate the error function. + Whether to compute 1 - the error function. + the error function. + + + Calculates the complementary inverse error function evaluated at z. + The complementary inverse error function evaluated at given value. + We have tested this implementation against the arbitrary precision mpmath library + and found cases where we can only guarantee 9 significant figures correct. + + returns double.PositiveInfinity if z <= 0.0. + returns double.NegativeInfinity if z >= 2.0. + + + calculates the complementary inverse error function evaluated at z. + value to evaluate. + the complementary inverse error function evaluated at Z. + + + + The implementation of the inverse error function. + + First intermediate parameter. + Second intermediate parameter. + Third intermediate parameter. + the inverse error function. + + + + Computes the generalized Exponential Integral function (En). + + The argument of the Exponential Integral function. + Integer power of the denominator term. Generalization index. + The value of the Exponential Integral function. + + This implementation of the computation of the Exponential Integral function follows the derivation in + "Handbook of Mathematical Functions, Applied Mathematics Series, Volume 55", Abramowitz, M., and Stegun, I.A. 1964, reprinted 1968 by + Dover Publications, New York), Chapters 6, 7, and 26. + AND + "Advanced mathematical methods for scientists and engineers", Bender, Carl M.; Steven A. Orszag (1978). page 253 + + + for x > 1 uses continued fraction approach that is often used to compute incomplete gamma. + for 0 < x <= 1 uses Taylor series expansion + + Our unit tests suggest that the accuracy of the Exponential Integral function is correct up to 13 floating point digits. + + + + + Initializes static members of the SpecialFunctions class. + + + + + Computes the factorial function x -> x! of an integer number > 0. The function can represent all number up + to 22! exactly, all numbers up to 170! using a double representation. All larger values will overflow. + + A value value! for value > 0 + + If you need to multiply or divide various such factorials, consider using the logarithmic version + instead so you can add instead of multiply and subtract instead of divide, and + then exponentiate the result using . This will also circumvent the problem that + factorials become very large even for small parameters. + + + + + + Computes the logarithmic factorial function x -> ln(x!) of an integer number > 0. + + A value value! for value > 0 + + + + Computes the binomial coefficient: n choose k. + + A nonnegative value n. + A nonnegative value h. + The binomial coefficient: n choose k. + + + + Computes the natural logarithm of the binomial coefficient: ln(n choose k). + + A nonnegative value n. + A nonnegative value h. + The logarithmic binomial coefficient: ln(n choose k). + + + + Computes the multinomial coefficient: n choose n1, n2, n3, ... + + A nonnegative value n. + An array of nonnegative values that sum to . + The multinomial coefficient. + if is . + If or any of the are negative. + If the sum of all is not equal to . + + + + The order of the approximation. + + + + + Auxiliary variable when evaluating the function. + + + + + Polynomial coefficients for the approximation. + + + + + Computes the logarithm of the Gamma function. + + The argument of the gamma function. + The logarithm of the gamma function. + + This implementation of the computation of the gamma and logarithm of the gamma function follows the derivation in + "An Analysis Of The Lanczos Gamma Approximation", Glendon Ralph Pugh, 2004. + We use the implementation listed on p. 116 which achieves an accuracy of 16 floating point digits. Although 16 digit accuracy + should be sufficient for double values, improving accuracy is possible (see p. 126 in Pugh). + Our unit tests suggest that the accuracy of the Gamma function is correct up to 14 floating point digits. + + + + + Computes the Gamma function. + + The argument of the gamma function. + The logarithm of the gamma function. + + + This implementation of the computation of the gamma and logarithm of the gamma function follows the derivation in + "An Analysis Of The Lanczos Gamma Approximation", Glendon Ralph Pugh, 2004. + We use the implementation listed on p. 116 which should achieve an accuracy of 16 floating point digits. Although 16 digit accuracy + should be sufficient for double values, improving accuracy is possible (see p. 126 in Pugh). + + Our unit tests suggest that the accuracy of the Gamma function is correct up to 13 floating point digits. + + + + + Returns the upper incomplete regularized gamma function + Q(a,x) = 1/Gamma(a) * int(exp(-t)t^(a-1),t=0..x) for real a > 0, x > 0. + + The argument for the gamma function. + The lower integral limit. + The upper incomplete regularized gamma function. + + + + Returns the upper incomplete gamma function + Gamma(a,x) = int(exp(-t)t^(a-1),t=0..x) for real a > 0, x > 0. + + The argument for the gamma function. + The lower integral limit. + The upper incomplete gamma function. + + + + Returns the lower incomplete gamma function + gamma(a,x) = int(exp(-t)t^(a-1),t=0..x) for real a > 0, x > 0. + + The argument for the gamma function. + The upper integral limit. + The lower incomplete gamma function. + + + + Returns the lower incomplete regularized gamma function + P(a,x) = 1/Gamma(a) * int(exp(-t)t^(a-1),t=0..x) for real a > 0, x > 0. + + The argument for the gamma function. + The upper integral limit. + The lower incomplete gamma function. + + + + Returns the inverse P^(-1) of the regularized lower incomplete gamma function + P(a,x) = 1/Gamma(a) * int(exp(-t)t^(a-1),t=0..x) for real a > 0, x > 0, + such that P^(-1)(a,P(a,x)) == x. + + + + + Computes the Digamma function which is mathematically defined as the derivative of the logarithm of the gamma function. + This implementation is based on + Jose Bernardo + Algorithm AS 103: + Psi ( Digamma ) Function, + Applied Statistics, + Volume 25, Number 3, 1976, pages 315-317. + Using the modifications as in Tom Minka's lightspeed toolbox. + + The argument of the digamma function. + The value of the DiGamma function at . + + + + Computes the inverse Digamma function: this is the inverse of the logarithm of the gamma function. This function will + only return solutions that are positive. + This implementation is based on the bisection method. + + The argument of the inverse digamma function. + The positive solution to the inverse DiGamma function at . + + + + Computes the 'th Harmonic number. + + The Harmonic number which needs to be computed. + The t'th Harmonic number. + + + + Compute the generalized harmonic number of order n of m. (1 + 1/2^m + 1/3^m + ... + 1/n^m) + + The order parameter. + The power parameter. + General Harmonic number. + + + + Computes the logistic function. see: http://en.wikipedia.org/wiki/Logistic + + The parameter for which to compute the logistic function. + The logistic function of . + + + + Computes the logit function, the inverse of the sigmoid logistic function. see: http://en.wikipedia.org/wiki/Logit + + The parameter for which to compute the logit function. This number should be + between 0 and 1. + The logarithm of divided by 1.0 - . + + + + ************************************** + COEFFICIENTS FOR METHODS bessi0 * + ************************************** + + Chebyshev coefficients for exp(-x) I0(x) + in the interval [0, 8]. + + lim(x->0){ exp(-x) I0(x) } = 1. + + + + Chebyshev coefficients for exp(-x) sqrt(x) I0(x) + in the inverted interval [8, infinity]. + + lim(x->inf){ exp(-x) sqrt(x) I0(x) } = 1/sqrt(2pi). + + + + + ************************************** + COEFFICIENTS FOR METHODS bessi1 * + ************************************** + + Chebyshev coefficients for exp(-x) I1(x) / x + in the interval [0, 8]. + + lim(x->0){ exp(-x) I1(x) / x } = 1/2. + + + + Chebyshev coefficients for exp(-x) sqrt(x) I1(x) + in the inverted interval [8, infinity]. + + lim(x->inf){ exp(-x) sqrt(x) I1(x) } = 1/sqrt(2pi). + + + + + ************************************** + COEFFICIENTS FOR METHODS bessk0, bessk0e * + ************************************** + + Chebyshev coefficients for K0(x) + log(x/2) I0(x) + in the interval [0, 2]. The odd order coefficients are all + zero; only the even order coefficients are listed. + + lim(x->0){ K0(x) + log(x/2) I0(x) } = -EUL. + + + + Chebyshev coefficients for exp(x) sqrt(x) K0(x) + in the inverted interval [2, infinity]. + + lim(x->inf){ exp(x) sqrt(x) K0(x) } = sqrt(pi/2). + + + + + ************************************** + COEFFICIENTS FOR METHODS bessk1, bessk1e * + ************************************** + + Chebyshev coefficients for x(K1(x) - log(x/2) I1(x)) + in the interval [0, 2]. + + lim(x->0){ x(K1(x) - log(x/2) I1(x)) } = 1. + + + + Chebyshev coefficients for exp(x) sqrt(x) K1(x) + in the interval [2, infinity]. + + lim(x->inf){ exp(x) sqrt(x) K1(x) } = sqrt(pi/2). + + + + Returns the modified Bessel function of first kind, order 0 of the argument. +

+ The function is defined as i0(x) = j0( ix ). +

+ The range is partitioned into the two intervals [0, 8] and + (8, infinity). Chebyshev polynomial expansions are employed + in each interval. +

+ The value to compute the bessel function of. + +
+ + Returns the modified Bessel function of first kind, + order 1 of the argument. +

+ The function is defined as i1(x) = -i j1( ix ). +

+ The range is partitioned into the two intervals [0, 8] and + (8, infinity). Chebyshev polynomial expansions are employed + in each interval. +

+ The value to compute the bessel function of. + +
+ + Returns the modified Bessel function of the second kind + of order 0 of the argument. +

+ The range is partitioned into the two intervals [0, 8] and + (8, infinity). Chebyshev polynomial expansions are employed + in each interval. +

+ The value to compute the bessel function of. + +
+ + Returns the exponentially scaled modified Bessel function + of the second kind of order 0 of the argument. + + The value to compute the bessel function of. + + + + Returns the modified Bessel function of the second kind + of order 1 of the argument. +

+ The range is partitioned into the two intervals [0, 2] and + (2, infinity). Chebyshev polynomial expansions are employed + in each interval. +

+ The value to compute the bessel function of. + +
+ + Returns the exponentially scaled modified Bessel function + of the second kind of order 1 of the argument. +

+ k1e(x) = exp(x) * k1(x). +

+ The value to compute the bessel function of. + +
+ + + Returns the modified Struve function of order 0. + + The value to compute the function of. + + + + Returns the modified Struve function of order 1. + + The value to compute the function of. + + + + Returns the difference between the Bessel I0 and Struve L0 functions. + + The value to compute the function of. + + + + Returns the difference between the Bessel I1 and Struve L1 functions. + + The value to compute the function of. + + + + Numerically stable exponential minus one, i.e. x -> exp(x)-1 + + A number specifying a power. + Returns exp(power)-1. + + + + Numerically stable hypotenuse of a right angle triangle, i.e. (a,b) -> sqrt(a^2 + b^2) + + The length of side a of the triangle. + The length of side b of the triangle. + Returns sqrt(a2 + b2) without underflow/overflow. + + + + Numerically stable hypotenuse of a right angle triangle, i.e. (a,b) -> sqrt(a^2 + b^2) + + The length of side a of the triangle. + The length of side b of the triangle. + Returns sqrt(a2 + b2) without underflow/overflow. + + + + Numerically stable hypotenuse of a right angle triangle, i.e. (a,b) -> sqrt(a^2 + b^2) + + The length of side a of the triangle. + The length of side b of the triangle. + Returns sqrt(a2 + b2) without underflow/overflow. + + + + Numerically stable hypotenuse of a right angle triangle, i.e. (a,b) -> sqrt(a^2 + b^2) + + The length of side a of the triangle. + The length of side b of the triangle. + Returns sqrt(a2 + b2) without underflow/overflow. + + + + Evaluation functions, useful for function approximation. + + + + + Evaluate a polynomial at point x. + Coefficients are ordered by power with power k at index k. + Example: coefficients [3,-1,2] represent y=2x^2-x+3. + + The location where to evaluate the polynomial at. + The coefficients of the polynomial, coefficient for power k at index k. + + + + Evaluate a polynomial at point x. + Coefficients are ordered by power with power k at index k. + Example: coefficients [3,-1,2] represent y=2x^2-x+3. + + The location where to evaluate the polynomial at. + The coefficients of the polynomial, coefficient for power k at index k. + + + + Evaluate a polynomial at point x. + Coefficients are ordered by power with power k at index k. + Example: coefficients [3,-1,2] represent y=2x^2-x+3. + + The location where to evaluate the polynomial at. + The coefficients of the polynomial, coefficient for power k at index k. + + + + Numerically stable series summation + + provides the summands sequentially + Sum + + + Evaluates the series of Chebyshev polynomials Ti at argument x/2. + The series is given by +
+                  N-1
+                   - '
+            y  =   >   coef[i] T (x/2)
+                   -            i
+                  i=0
+            
+ Coefficients are stored in reverse order, i.e. the zero + order term is last in the array. Note N is the number of + coefficients, not the order. +

+ If coefficients are for the interval a to b, x must + have been transformed to x -> 2(2x - b - a)/(b-a) before + entering the routine. This maps x from (a, b) to (-1, 1), + over which the Chebyshev polynomials are defined. +

+ If the coefficients are for the inverted interval, in + which (a, b) is mapped to (1/b, 1/a), the transformation + required is x -> 2(2ab/x - b - a)/(b-a). If b is infinity, + this becomes x -> 4a/x - 1. +

+ SPEED: +

+ Taking advantage of the recurrence properties of the + Chebyshev polynomials, the routine requires one more + addition per loop than evaluating a nested polynomial of + the same degree. +

+ The coefficients of the polynomial. + Argument to the polynomial. + + Reference: https://bpm2.svn.codeplex.com/svn/Common.Numeric/Arithmetic.cs +

+ Marked as Deprecated in + http://people.apache.org/~isabel/mahout_site/mahout-matrix/apidocs/org/apache/mahout/jet/math/Arithmetic.html + + + +

+ Summation of Chebyshev polynomials, using the Clenshaw method with Reinsch modification. + + The no. of terms in the sequence. + The coefficients of the Chebyshev series, length n+1. + The value at which the series is to be evaluated. + + ORIGINAL AUTHOR: + Dr. Allan J. MacLeod; Dept. of Mathematics and Statistics, University of Paisley; High St., PAISLEY, SCOTLAND + REFERENCES: + "An error analysis of the modified Clenshaw method for evaluating Chebyshev and Fourier series" + J. Oliver, J.I.M.A., vol. 20, 1977, pp379-391 + +
+ + + Valley-shaped Rosenbrock function for 2 dimensions: (x,y) -> (1-x)^2 + 100*(y-x^2)^2. + This function has a global minimum at (1,1) with f(1,1) = 0. + Common range: [-5,10] or [-2.048,2.048]. + + + https://en.wikipedia.org/wiki/Rosenbrock_function + http://www.sfu.ca/~ssurjano/rosen.html + + + + + Valley-shaped Rosenbrock function for 2 or more dimensions. + This function have a global minimum of all ones and, for 8 > N > 3, a local minimum at (-1,1,...,1). + + + https://en.wikipedia.org/wiki/Rosenbrock_function + http://www.sfu.ca/~ssurjano/rosen.html + + + + + Himmelblau, a multi-modal function: (x,y) -> (x^2+y-11)^2 + (x+y^2-7)^2 + This function has 4 global minima with f(x,y) = 0. + Common range: [-6,6]. + Named after David Mautner Himmelblau + + + https://en.wikipedia.org/wiki/Himmelblau%27s_function + + + + + Rastrigin, a highly multi-modal function with many local minima. + Global minimum of all zeros with f(0) = 0. + Common range: [-5.12,5.12]. + + + https://en.wikipedia.org/wiki/Rastrigin_function + http://www.sfu.ca/~ssurjano/rastr.html + + + + + Drop-Wave, a multi-modal and highly complex function with many local minima. + Global minimum of all zeros with f(0) = -1. + Common range: [-5.12,5.12]. + + + http://www.sfu.ca/~ssurjano/drop.html + + + + + Ackley, a function with many local minima. It is nearly flat in outer regions but has a large hole at the center. + Global minimum of all zeros with f(0) = 0. + Common range: [-32.768, 32.768]. + + + http://www.sfu.ca/~ssurjano/ackley.html + + + + + Bowl-shaped first Bohachevsky function. + Global minimum of all zeros with f(0,0) = 0. + Common range: [-100, 100] + + + http://www.sfu.ca/~ssurjano/boha.html + + + + + Plate-shaped Matyas function. + Global minimum of all zeros with f(0,0) = 0. + Common range: [-10, 10]. + + + http://www.sfu.ca/~ssurjano/matya.html + + + + + Valley-shaped six-hump camel back function. + Two global minima and four local minima. Global minima with f(x) ) -1.0316 at (0.0898,-0.7126) and (-0.0898,0.7126). + Common range: x in [-3,3], y in [-2,2]. + + + http://www.sfu.ca/~ssurjano/camel6.html + + + + + Statistics operating on arrays assumed to be unsorted. + WARNING: Methods with the Inplace-suffix may modify the data array by reordering its entries. + + + + + + + + Returns the smallest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the smallest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the largest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the largest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the smallest value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the largest value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the smallest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the largest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the geometric mean of the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the harmonic mean of the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population variance from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the population variance from the full population provided as unsorted array. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population standard deviation from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the population standard deviation from the full population provided as unsorted array. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population variance from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN and NaN for variance if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population standard deviation from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN and NaN for standard deviation if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population covariance from the provided two sample arrays. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + First sample array. + Second sample array. + + + + Evaluates the population covariance from the full population provided as two arrays. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + First population array. + Second population array. + + + + Estimates the root mean square (RMS) also known as quadratic mean from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the order statistic (order 1..N) from the unsorted data array. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + One-based order of the statistic, must be between 1 and N (inclusive). + + + + Estimates the median value from the unsorted data array. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the p-Percentile value from the unsorted data array. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Percentile selector, between 0 and 100 (inclusive). + + + + Estimates the first quartile value from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the third quartile value from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the inter-quartile range from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates {min, lower-quantile, median, upper-quantile, max} from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the tau-th quantile from the unsorted data array. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Quantile selector, between 0.0 and 1.0 (inclusive). + + R-8, SciPy-(1/3,1/3): + Linear interpolation of the approximate medians for order statistics. + When tau < (2/3) / (N + 1/3), use x1. When tau >= (N - 1/3) / (N + 1/3), use xN. + + + + + Estimates the tau-th quantile from the unsorted data array. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile defintion can be specified + by 4 parameters a, b, c and d, consistent with Mathematica. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Quantile selector, between 0.0 and 1.0 (inclusive) + a-parameter + b-parameter + c-parameter + d-parameter + + + + Estimates the tau-th quantile from the unsorted data array. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Quantile selector, between 0.0 and 1.0 (inclusive) + Quantile definition, to choose what product/definition it should be consistent with + + + + Evaluates the rank of each entry of the unsorted data array. + The rank definition can be specified to be compatible + with an existing system. + WARNING: Works inplace and can thus causes the data array to be reordered. + + + + + Estimates the arithmetic sample mean from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the geometric mean of the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the harmonic mean of the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population variance from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the population variance from the full population provided as unsorted array. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population standard deviation from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the population standard deviation from the full population provided as unsorted array. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population variance from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN and NaN for variance if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population standard deviation from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN and NaN for standard deviation if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population covariance from the provided two sample arrays. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + First sample array. + Second sample array. + + + + Evaluates the population covariance from the full population provided as two arrays. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + First population array. + Second population array. + + + + Estimates the root mean square (RMS) also known as quadratic mean from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the smallest value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the smallest value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the smallest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the largest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the geometric mean of the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the harmonic mean of the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population variance from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the population variance from the full population provided as unsorted array. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population standard deviation from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the population standard deviation from the full population provided as unsorted array. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population variance from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN and NaN for variance if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population standard deviation from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN and NaN for standard deviation if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population covariance from the provided two sample arrays. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + First sample array. + Second sample array. + + + + Evaluates the population covariance from the full population provided as two arrays. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + First population array. + Second population array. + + + + Estimates the root mean square (RMS) also known as quadratic mean from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the order statistic (order 1..N) from the unsorted data array. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + One-based order of the statistic, must be between 1 and N (inclusive). + + + + Estimates the median value from the unsorted data array. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the p-Percentile value from the unsorted data array. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Percentile selector, between 0 and 100 (inclusive). + + + + Estimates the first quartile value from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the third quartile value from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the inter-quartile range from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates {min, lower-quantile, median, upper-quantile, max} from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the tau-th quantile from the unsorted data array. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Quantile selector, between 0.0 and 1.0 (inclusive). + + R-8, SciPy-(1/3,1/3): + Linear interpolation of the approximate medians for order statistics. + When tau < (2/3) / (N + 1/3), use x1. When tau >= (N - 1/3) / (N + 1/3), use xN. + + + + + Estimates the tau-th quantile from the unsorted data array. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile defintion can be specified + by 4 parameters a, b, c and d, consistent with Mathematica. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Quantile selector, between 0.0 and 1.0 (inclusive) + a-parameter + b-parameter + c-parameter + d-parameter + + + + Estimates the tau-th quantile from the unsorted data array. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Quantile selector, between 0.0 and 1.0 (inclusive) + Quantile definition, to choose what product/definition it should be consistent with + + + + Evaluates the rank of each entry of the unsorted data array. + The rank definition can be specified to be compatible + with an existing system. + WARNING: Works inplace and can thus causes the data array to be reordered. + + + + + A class with correlation measures between two datasets. + + + + + Computes the Pearson Product-Moment Correlation coefficient. + + Sample data A. + Sample data B. + The Pearson product-moment correlation coefficient. + + + + Computes the Weighted Pearson Product-Moment Correlation coefficient. + + Sample data A. + Sample data B. + Corresponding weights of data. + The Weighted Pearson product-moment correlation coefficient. + + + + Computes the Pearson Product-Moment Correlation matrix. + + Array of sample data vectors. + The Pearson product-moment correlation matrix. + + + + Computes the Pearson Product-Moment Correlation matrix. + + Enumerable of sample data vectors. + The Pearson product-moment correlation matrix. + + + + Computes the Spearman Ranked Correlation coefficient. + + Sample data series A. + Sample data series B. + The Spearman ranked correlation coefficient. + + + + Computes the Spearman Ranked Correlation matrix. + + Array of sample data vectors. + The Spearman ranked correlation matrix. + + + + Computes the Spearman Ranked Correlation matrix. + + Enumerable of sample data vectors. + The Spearman ranked correlation matrix. + + + + Computes the basic statistics of data set. The class meets the + NIST standard of accuracy for mean, variance, and standard deviation + (the only statistics they provide exact values for) and exceeds them + in increased accuracy mode. + Recommendation: consider to use RunningStatistics instead. + + + This type declares a DataContract for out of the box ephemeral serialization + with engines like DataContractSerializer, Protocol Buffers and FsPickler, + but does not guarantee any compatibility between versions. + It is not recommended to rely on this mechanism for durable persistance. + + + + + Initializes a new instance of the class. + + The sample data. + + If set to true, increased accuracy mode used. + Increased accuracy mode uses types for internal calculations. + + + Don't use increased accuracy for data sets containing large values (in absolute value). + This may cause the calculations to overflow. + + + + + Initializes a new instance of the class. + + The sample data. + + If set to true, increased accuracy mode used. + Increased accuracy mode uses types for internal calculations. + + + Don't use increased accuracy for data sets containing large values (in absolute value). + This may cause the calculations to overflow. + + + + + Gets the size of the sample. + + The size of the sample. + + + + Gets the sample mean. + + The sample mean. + + + + Gets the unbiased population variance estimator (on a dataset of size N will use an N-1 normalizer). + + The sample variance. + + + + Gets the unbiased population standard deviation (on a dataset of size N will use an N-1 normalizer). + + The sample standard deviation. + + + + Gets the sample skewness. + + The sample skewness. + Returns zero if is less than three. + + + + Gets the sample kurtosis. + + The sample kurtosis. + Returns zero if is less than four. + + + + Gets the maximum sample value. + + The maximum sample value. + + + + Gets the minimum sample value. + + The minimum sample value. + + + + Computes descriptive statistics from a stream of data values. + + A sequence of datapoints. + + + + Computes descriptive statistics from a stream of nullable data values. + + A sequence of datapoints. + + + + Computes descriptive statistics from a stream of data values. + + A sequence of datapoints. + + + + Computes descriptive statistics from a stream of nullable data values. + + A sequence of datapoints. + + + + Internal use. Method use for setting the statistics. + + For setting Mean. + For setting Variance. + For setting Skewness. + For setting Kurtosis. + For setting Minimum. + For setting Maximum. + For setting Count. + + + + A consists of a series of s, + each representing a region limited by a lower bound (exclusive) and an upper bound (inclusive). + + + This type declares a DataContract for out of the box ephemeral serialization + with engines like DataContractSerializer, Protocol Buffers and FsPickler, + but does not guarantee any compatibility between versions. + It is not recommended to rely on this mechanism for durable persistance. + + + + + This IComparer performs comparisons between a point and a bucket. + + + + + Compares a point and a bucket. The point will be encapsulated in a bucket with width 0. + + The first bucket to compare. + The second bucket to compare. + -1 when the point is less than this bucket, 0 when it is in this bucket and 1 otherwise. + + + + Lower Bound of the Bucket. + + + + + Upper Bound of the Bucket. + + + + + The number of datapoints in the bucket. + + + Value may be NaN if this was constructed as a argument. + + + + + Initializes a new instance of the Bucket class. + + + + + Constructs a Bucket that can be used as an argument for a + like when performing a Binary search. + + Value to look for + + + + Creates a copy of the Bucket with the lowerbound, upperbound and counts exactly equal. + + A cloned Bucket object. + + + + Width of the Bucket. + + + + + True if this is a single point argument for + when performing a Binary search. + + + + + Default comparer. + + + + + This method check whether a point is contained within this bucket. + + The point to check. + + 0 if the point falls within the bucket boundaries; + -1 if the point is smaller than the bucket, + +1 if the point is larger than the bucket. + + + + Comparison of two disjoint buckets. The buckets cannot be overlapping. + + + 0 if UpperBound and LowerBound are bit-for-bit equal + 1 if This bucket is lower that the compared bucket + -1 otherwise + + + + + Checks whether two Buckets are equal. + + + UpperBound and LowerBound are compared bit-for-bit, but This method tolerates a + difference in Count given by . + + + + + Provides a hash code for this bucket. + + + + + Formats a human-readable string for this bucket. + + + + + A class which computes histograms of data. + + + + + Contains all the Buckets of the Histogram. + + + + + Indicates whether the elements of buckets are currently sorted. + + + + + Initializes a new instance of the Histogram class. + + + + + Constructs a Histogram with a specific number of equally sized buckets. The upper and lower bound of the histogram + will be set to the smallest and largest datapoint. + + The datasequence to build a histogram on. + The number of buckets to use. + + + + Constructs a Histogram with a specific number of equally sized buckets. + + The datasequence to build a histogram on. + The number of buckets to use. + The histogram lower bound. + The histogram upper bound. + + + + Add one data point to the histogram. If the datapoint falls outside the range of the histogram, + the lowerbound or upperbound will automatically adapt. + + The datapoint which we want to add. + + + + Add a sequence of data point to the histogram. If the datapoint falls outside the range of the histogram, + the lowerbound or upperbound will automatically adapt. + + The sequence of datapoints which we want to add. + + + + Adds a Bucket to the Histogram. + + + + + Sort the buckets if needed. + + + + + Returns the Bucket that contains the value v. + + The point to search the bucket for. + A copy of the bucket containing point . + + + + Returns the index in the Histogram of the Bucket + that contains the value v. + + The point to search the bucket index for. + The index of the bucket containing the point. + + + + Returns the lower bound of the histogram. + + + + + Returns the upper bound of the histogram. + + + + + Gets the n'th bucket. + + The index of the bucket to be returned. + A copy of the n'th bucket. + + + + Gets the number of buckets. + + + + + Gets the total number of datapoints in the histogram. + + + + + Prints the buckets contained in the . + + + + + A hybrid Monte Carlo sampler for multivariate distributions. + + + + + Number of parameters in the density function. + + + + + Distribution to sample momentum from. + + + + + Standard deviations used in the sampling of different components of the + momentum. + + + + + Gets or sets the standard deviations used in the sampling of different components of the + momentum. + + When the length of pSdv is not the same as Length. + + + + Constructs a new Hybrid Monte Carlo sampler for a multivariate probability distribution. + The components of the momentum will be sampled from a normal distribution with standard deviation + 1 using the default random + number generator. A three point estimation will be used for differentiation. + This constructor will set the burn interval. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + When the number of burnInterval iteration is negative. + + + + Constructs a new Hybrid Monte Carlo sampler for a multivariate probability distribution. + The components of the momentum will be sampled from a normal distribution with standard deviation + specified by pSdv using the default random + number generator. A three point estimation will be used for differentiation. + This constructor will set the burn interval. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + The standard deviations of the normal distributions that are used to sample + the components of the momentum. + When the number of burnInterval iteration is negative. + + + + Constructs a new Hybrid Monte Carlo sampler for a multivariate probability distribution. + The components of the momentum will be sampled from a normal distribution with standard deviation + specified by pSdv using the a random number generator provided by the user. + A three point estimation will be used for differentiation. + This constructor will set the burn interval. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + The standard deviations of the normal distributions that are used to sample + the components of the momentum. + Random number generator used for sampling the momentum. + When the number of burnInterval iteration is negative. + + + + Constructs a new Hybrid Monte Carlo sampler for a multivariate probability distribution. + The components of the momentum will be sampled from a normal distribution with standard deviations + given by pSdv. This constructor will set the burn interval, the method used for + numerical differentiation and the random number generator. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + The standard deviations of the normal distributions that are used to sample + the components of the momentum. + Random number generator used for sampling the momentum. + The method used for numerical differentiation. + When the number of burnInterval iteration is negative. + When the length of pSdv is not the same as x0. + + + + Initialize parameters. + + The current location of the sampler. + + + + Checking that the location and the momentum are of the same dimension and that each component is positive. + + The standard deviations used for sampling the momentum. + When the length of pSdv is not the same as Length or if any + component is negative. + When pSdv is null. + + + + Use for copying objects in the Burn method. + + The source of copying. + A copy of the source object. + + + + Use for creating temporary objects in the Burn method. + + An object of type T. + + + + + + + + + + + + + Samples the momentum from a normal distribution. + + The momentum to be randomized. + + + + The default method used for computing the gradient. Uses a simple three point estimation. + + Function which the gradient is to be evaluated. + The location where the gradient is to be evaluated. + The gradient of the function at the point x. + + + + The Hybrid (also called Hamiltonian) Monte Carlo produces samples from distribution P using a set + of Hamiltonian equations to guide the sampling process. It uses the negative of the log density as + a potential energy, and a randomly generated momentum to set up a Hamiltonian system, which is then used + to sample the distribution. This can result in a faster convergence than the random walk Metropolis sampler + (). + + The type of samples this sampler produces. + + + + The delegate type that defines a derivative evaluated at a certain point. + + Function to be differentiated. + Value where the derivative is computed. + + + + Evaluates the energy function of the target distribution. + + + + + The current location of the sampler. + + + + + The number of burn iterations between two samples. + + + + + The size of each step in the Hamiltonian equation. + + + + + The number of iterations in the Hamiltonian equation. + + + + + The algorithm used for differentiation. + + + + + Gets or sets the number of iterations in between returning samples. + + When burn interval is negative. + + + + Gets or sets the number of iterations in the Hamiltonian equation. + + When frogleap steps is negative or zero. + + + + Gets or sets the size of each step in the Hamiltonian equation. + + When step size is negative or zero. + + + + Constructs a new Hybrid Monte Carlo sampler. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + Random number generator used for sampling the momentum. + The method used for differentiation. + When the number of burnInterval iteration is negative. + When either x0, pdfLnP or diff is null. + + + + Returns a sample from the distribution P. + + + + + This method runs the sampler for a number of iterations without returning a sample + + + + + Method used to update the sample location. Used in the end of the loop. + + The old energy. + The old gradient/derivative of the energy. + The new sample. + The new gradient/derivative of the energy. + The new energy. + The difference between the old Hamiltonian and new Hamiltonian. Use to determine + if an update should take place. + + + + Use for creating temporary objects in the Burn method. + + An object of type T. + + + + Use for copying objects in the Burn method. + + The source of copying. + A copy of the source object. + + + + Method for doing dot product. + + First vector/scalar in the product. + Second vector/scalar in the product. + + + + Method for adding, multiply the second vector/scalar by factor and then + add it to the first vector/scalar. + + First vector/scalar. + Scalar factor multiplying by the second vector/scalar. + Second vector/scalar. + + + + Multiplying the second vector/scalar by factor and then subtract it from + the first vector/scalar. + + First vector/scalar. + Scalar factor to be multiplied to the second vector/scalar. + Second vector/scalar. + + + + Method for sampling a random momentum. + + Momentum to be randomized. + + + + The Hamiltonian equations that is used to produce the new sample. + + + + + Method to compute the Hamiltonian used in the method. + + The momentum. + The energy. + Hamiltonian=E+p.p/2 + + + + Method to check and set a quantity to a non-negative value. + + Proposed value to be checked. + Returns value if it is greater than or equal to zero. + Throws when value is negative. + + + + Method to check and set a quantity to a non-negative value. + + Proposed value to be checked. + Returns value if it is greater than to zero. + Throws when value is negative or zero. + + + + Method to check and set a quantity to a non-negative value. + + Proposed value to be checked. + Returns value if it is greater than zero. + Throws when value is negative or zero. + + + + Provides utilities to analysis the convergence of a set of samples from + a . + + + + + Computes the auto correlations of a series evaluated by a function f. + + The series for computing the auto correlation. + The lag in the series + The function used to evaluate the series. + The auto correlation. + Throws if lag is zero or if lag is + greater than or equal to the length of Series. + + + + Computes the effective size of the sample when evaluated by a function f. + + The samples. + The function use for evaluating the series. + The effective size when auto correlation is taken into account. + + + + A method which samples datapoints from a proposal distribution. The implementation of this sampler + is stateless: no variables are saved between two calls to Sample. This proposal is different from + in that it doesn't take any parameters; it samples random + variables from the whole domain. + + The type of the datapoints. + A sample from the proposal distribution. + + + + A method which samples datapoints from a proposal distribution given an initial sample. The implementation + of this sampler is stateless: no variables are saved between two calls to Sample. This proposal is different from + in that it samples locally around an initial point. In other words, it + makes a small local move rather than producing a global sample from the proposal. + + The type of the datapoints. + The initial sample. + A sample from the proposal distribution. + + + + A function which evaluates a density. + + The type of data the distribution is over. + The sample we want to evaluate the density for. + + + + A function which evaluates a log density. + + The type of data the distribution is over. + The sample we want to evaluate the log density for. + + + + A function which evaluates the log of a transition kernel probability. + + The type for the space over which this transition kernel is defined. + The new state in the transition. + The previous state in the transition. + The log probability of the transition. + + + + The interface which every sampler must implement. + + The type of samples this sampler produces. + + + + The random number generator for this class. + + + + + Keeps track of the number of accepted samples. + + + + + Keeps track of the number of calls to the proposal sampler. + + + + + Initializes a new instance of the class. + + Thread safe instances are two and half times slower than non-thread + safe classes. + + + + Gets or sets the random number generator. + + When the random number generator is null. + + + + Returns one sample. + + + + + Returns a number of samples. + + The number of samples we want. + An array of samples. + + + + Gets the acceptance rate of the sampler. + + + + + Metropolis-Hastings sampling produces samples from distribition P by sampling from a proposal distribution Q + and accepting/rejecting based on the density of P. Metropolis-Hastings sampling doesn't require that the + proposal distribution Q is symmetric in comparison to . It does need to + be able to evaluate the proposal sampler's log density though. All densities are required to be in log space. + + The Metropolis-Hastings sampler is a stateful sampler. It keeps track of where it currently is in the domain + of the distribution P. + + The type of samples this sampler produces. + + + + Evaluates the log density function of the target distribution. + + + + + Evaluates the log transition probability for the proposal distribution. + + + + + A function which samples from a proposal distribution. + + + + + The current location of the sampler. + + + + + The log density at the current location. + + + + + The number of burn iterations between two samples. + + + + + Constructs a new Metropolis-Hastings sampler using the default random number generator. This + constructor will set the burn interval. + + The initial sample. + The log density of the distribution we want to sample from. + The log transition probability for the proposal distribution. + A method that samples from the proposal distribution. + The number of iterations in between returning samples. + When the number of burnInterval iteration is negative. + + + + Gets or sets the number of iterations in between returning samples. + + When burn interval is negative. + + + + This method runs the sampler for a number of iterations without returning a sample + + + + + Returns a sample from the distribution P. + + + + + Metropolis sampling produces samples from distribition P by sampling from a proposal distribution Q + and accepting/rejecting based on the density of P. Metropolis sampling requires that the proposal + distribution Q is symmetric. All densities are required to be in log space. + + The Metropolis sampler is a stateful sampler. It keeps track of where it currently is in the domain + of the distribution P. + + The type of samples this sampler produces. + + + + Evaluates the log density function of the sampling distribution. + + + + + A function which samples from a proposal distribution. + + + + + The current location of the sampler. + + + + + The log density at the current location. + + + + + The number of burn iterations between two samples. + + + + + Constructs a new Metropolis sampler using the default random number generator. + + The initial sample. + The log density of the distribution we want to sample from. + A method that samples from the symmetric proposal distribution. + The number of iterations in between returning samples. + When the number of burnInterval iteration is negative. + + + + Gets or sets the number of iterations in between returning samples. + + When burn interval is negative. + + + + This method runs the sampler for a number of iterations without returning a sample + + + + + Returns a sample from the distribution P. + + + + + Rejection sampling produces samples from distribition P by sampling from a proposal distribution Q + and accepting/rejecting based on the density of P and Q. The density of P and Q don't need to + to be normalized, but we do need that for each x, P(x) < Q(x). + + The type of samples this sampler produces. + + + + Evaluates the density function of the sampling distribution. + + + + + Evaluates the density function of the proposal distribution. + + + + + A function which samples from a proposal distribution. + + + + + Constructs a new rejection sampler using the default random number generator. + + The density of the distribution we want to sample from. + The density of the proposal distribution. + A method that samples from the proposal distribution. + + + + Returns a sample from the distribution P. + + When the algorithms detects that the proposal + distribution doesn't upper bound the target distribution. + + + + A hybrid Monte Carlo sampler for univariate distributions. + + + + + Distribution to sample momentum from. + + + + + Standard deviations used in the sampling of the + momentum. + + + + + Gets or sets the standard deviation used in the sampling of the + momentum. + + When standard deviation is negative. + + + + Constructs a new Hybrid Monte Carlo sampler for a univariate probability distribution. + The momentum will be sampled from a normal distribution with standard deviation + specified by pSdv using the default random + number generator. A three point estimation will be used for differentiation. + This constructor will set the burn interval. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + The standard deviation of the normal distribution that is used to sample + the momentum. + When the number of burnInterval iteration is negative. + + + + Constructs a new Hybrid Monte Carlo sampler for a univariate probability distribution. + The momentum will be sampled from a normal distribution with standard deviation + specified by pSdv using a random + number generator provided by the user. A three point estimation will be used for differentiation. + This constructor will set the burn interval. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + The standard deviation of the normal distribution that is used to sample + the momentum. + Random number generator used to sample the momentum. + When the number of burnInterval iteration is negative. + + + + Constructs a new Hybrid Monte Carlo sampler for a multivariate probability distribution. + The momentum will be sampled from a normal distribution with standard deviation + given by pSdv using a random + number generator provided by the user. This constructor will set both the burn interval and the method used for + numerical differentiation. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + The standard deviation of the normal distribution that is used to sample + the momentum. + The method used for numerical differentiation. + Random number generator used for sampling the momentum. + When the number of burnInterval iteration is negative. + + + + Use for copying objects in the Burn method. + + The source of copying. + A copy of the source object. + + + + Use for creating temporary objects in the Burn method. + + An object of type T. + + + + + + + + + + + + + Samples the momentum from a normal distribution. + + The momentum to be randomized. + + + + The default method used for computing the derivative. Uses a simple three point estimation. + + Function for which the derivative is to be evaluated. + The location where the derivative is to be evaluated. + The derivative of the function at the point x. + + + + Slice sampling produces samples from distribition P by uniformly sampling from under the pdf of P using + a technique described in "Slice Sampling", R. Neal, 2003. All densities are required to be in log space. + + The slice sampler is a stateful sampler. It keeps track of where it currently is in the domain + of the distribution P. + + + + + Evaluates the log density function of the target distribution. + + + + + The current location of the sampler. + + + + + The log density at the current location. + + + + + The number of burn iterations between two samples. + + + + + The scale of the slice sampler. + + + + + Constructs a new Slice sampler using the default random + number generator. The burn interval will be set to 0. + + The initial sample. + The density of the distribution we want to sample from. + The scale factor of the slice sampler. + When the scale of the slice sampler is not positive. + + + + Constructs a new slice sampler using the default random number generator. It + will set the number of burnInterval iterations and run a burnInterval phase. + + The initial sample. + The density of the distribution we want to sample from. + The number of iterations in between returning samples. + The scale factor of the slice sampler. + When the number of burnInterval iteration is negative. + When the scale of the slice sampler is not positive. + + + + Gets or sets the number of iterations in between returning samples. + + When burn interval is negative. + + + + Gets or sets the scale of the slice sampler. + + + + + This method runs the sampler for a number of iterations without returning a sample + + + + + Returns a sample from the distribution P. + + + + + Running statistics over a window of data, allows updating by adding values. + + + + + Gets the total number of samples. + + + + + Returns the minimum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + + + + Returns the maximum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + + + + Evaluates the sample mean, an estimate of the population mean. + Returns NaN if data is empty or if any entry is NaN. + + + + + Estimates the unbiased population variance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + + + + Evaluates the variance from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + + + + Estimates the unbiased population standard deviation from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + + + + Evaluates the standard deviation from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + + + + Update the running statistics by adding another observed sample (in-place). + + + + + Update the running statistics by adding a sequence of observed sample (in-place). + + + + Replace ties with their mean (non-integer ranks). Default. + + + Replace ties with their minimum (typical sports ranking). + + + Replace ties with their maximum. + + + Permutation with increasing values at each index of ties. + + + + Running statistics accumulator, allows updating by adding values + or by combining two accumulators. + + + This type declares a DataContract for out of the box ephemeral serialization + with engines like DataContractSerializer, Protocol Buffers and FsPickler, + but does not guarantee any compatibility between versions. + It is not recommended to rely on this mechanism for durable persistance. + + + + + Gets the total number of samples. + + + + + Returns the minimum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + + + + Returns the maximum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + + + + Evaluates the sample mean, an estimate of the population mean. + Returns NaN if data is empty or if any entry is NaN. + + + + + Estimates the unbiased population variance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + + + + Evaluates the variance from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + + + + Estimates the unbiased population standard deviation from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + + + + Evaluates the standard deviation from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + + + + Estimates the unbiased population skewness from the provided samples. + Uses a normalizer (Bessel's correction; type 2). + Returns NaN if data has less than three entries or if any entry is NaN. + + + + + Evaluates the population skewness from the full population. + Does not use a normalizer and would thus be biased if applied to a subset (type 1). + Returns NaN if data has less than two entries or if any entry is NaN. + + + + + Estimates the unbiased population kurtosis from the provided samples. + Uses a normalizer (Bessel's correction; type 2). + Returns NaN if data has less than four entries or if any entry is NaN. + + + + + Evaluates the population kurtosis from the full population. + Does not use a normalizer and would thus be biased if applied to a subset (type 1). + Returns NaN if data has less than three entries or if any entry is NaN. + + + + + Update the running statistics by adding another observed sample (in-place). + + + + + Update the running statistics by adding a sequence of observed sample (in-place). + + + + + Create a new running statistics over the combined samples of two existing running statistics. + + + + + Statistics operating on an array already sorted ascendingly. + + + + + + + + Returns the smallest value from the sorted data array (ascending). + + Sample array, must be sorted ascendingly. + + + + Returns the largest value from the sorted data array (ascending). + + Sample array, must be sorted ascendingly. + + + + Returns the order statistic (order 1..N) from the sorted data array (ascending). + + Sample array, must be sorted ascendingly. + One-based order of the statistic, must be between 1 and N (inclusive). + + + + Estimates the median value from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the p-Percentile value from the sorted data array (ascending). + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + Percentile selector, between 0 and 100 (inclusive). + + + + Estimates the first quartile value from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the third quartile value from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the inter-quartile range from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates {min, lower-quantile, median, upper-quantile, max} from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the tau-th quantile from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + Quantile selector, between 0.0 and 1.0 (inclusive). + + R-8, SciPy-(1/3,1/3): + Linear interpolation of the approximate medians for order statistics. + When tau < (2/3) / (N + 1/3), use x1. When tau >= (N - 1/3) / (N + 1/3), use xN. + + + + + Estimates the tau-th quantile from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile defintion can be specified + by 4 parameters a, b, c and d, consistent with Mathematica. + + Sample array, must be sorted ascendingly. + Quantile selector, between 0.0 and 1.0 (inclusive). + a-parameter + b-parameter + c-parameter + d-parameter + + + + Estimates the tau-th quantile from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + Sample array, must be sorted ascendingly. + Quantile selector, between 0.0 and 1.0 (inclusive). + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the empirical cumulative distribution function (CDF) at x from the sorted data array (ascending). + + The data sample sequence. + The value where to estimate the CDF at. + + + + Estimates the quantile tau from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile value. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Evaluates the rank of each entry of the sorted data array (ascending). + The rank definition can be specified to be compatible + with an existing system. + + + + + Returns the smallest value from the sorted data array (ascending). + + Sample array, must be sorted ascendingly. + + + + Returns the largest value from the sorted data array (ascending). + + Sample array, must be sorted ascendingly. + + + + Returns the order statistic (order 1..N) from the sorted data array (ascending). + + Sample array, must be sorted ascendingly. + One-based order of the statistic, must be between 1 and N (inclusive). + + + + Estimates the median value from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the p-Percentile value from the sorted data array (ascending). + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + Percentile selector, between 0 and 100 (inclusive). + + + + Estimates the first quartile value from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the third quartile value from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the inter-quartile range from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates {min, lower-quantile, median, upper-quantile, max} from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the tau-th quantile from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + Quantile selector, between 0.0 and 1.0 (inclusive). + + R-8, SciPy-(1/3,1/3): + Linear interpolation of the approximate medians for order statistics. + When tau < (2/3) / (N + 1/3), use x1. When tau >= (N - 1/3) / (N + 1/3), use xN. + + + + + Estimates the tau-th quantile from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile defintion can be specified + by 4 parameters a, b, c and d, consistent with Mathematica. + + Sample array, must be sorted ascendingly. + Quantile selector, between 0.0 and 1.0 (inclusive). + a-parameter + b-parameter + c-parameter + d-parameter + + + + Estimates the tau-th quantile from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + Sample array, must be sorted ascendingly. + Quantile selector, between 0.0 and 1.0 (inclusive). + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the empirical cumulative distribution function (CDF) at x from the sorted data array (ascending). + + The data sample sequence. + The value where to estimate the CDF at. + + + + Estimates the quantile tau from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile value. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Evaluates the rank of each entry of the sorted data array (ascending). + The rank definition can be specified to be compatible + with an existing system. + + + + + Extension methods to return basic statistics on set of data. + + + + + Returns the minimum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Returns the minimum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Returns the minimum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + Null-entries are ignored. + + The sample data. + The minimum value in the sample data. + + + + Returns the maximum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The maximum value in the sample data. + + + + Returns the maximum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The maximum value in the sample data. + + + + Returns the maximum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + Null-entries are ignored. + + The sample data. + The maximum value in the sample data. + + + + Returns the minimum absolute value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Returns the minimum absolute value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Returns the maximum absolute value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The maximum value in the sample data. + + + + Returns the maximum absolute value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The maximum value in the sample data. + + + + Returns the minimum magnitude and phase value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Returns the minimum magnitude and phase value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Returns the maximum magnitude and phase value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Returns the maximum magnitude and phase value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Evaluates the sample mean, an estimate of the population mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the mean of. + The mean of the sample. + + + + Evaluates the sample mean, an estimate of the population mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the mean of. + The mean of the sample. + + + + Evaluates the sample mean, an estimate of the population mean. + Returns NaN if data is empty or if any entry is NaN. + Null-entries are ignored. + + The data to calculate the mean of. + The mean of the sample. + + + + Evaluates the geometric mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the geometric mean of. + The geometric mean of the sample. + + + + Evaluates the geometric mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the geometric mean of. + The geometric mean of the sample. + + + + Evaluates the harmonic mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the harmonic mean of. + The harmonic mean of the sample. + + + + Evaluates the harmonic mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the harmonic mean of. + The harmonic mean of the sample. + + + + Estimates the unbiased population variance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population variance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population variance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + Null-entries are ignored. + + A subset of samples, sampled from the full population. + + + + Evaluates the variance from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + The full population data. + + + + Evaluates the variance from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + The full population data. + + + + Evaluates the variance from the provided full population. + On a dataset of size N will use an N normalize and would thus be biased if applied to a subsetr. + Returns NaN if data is empty or if any entry is NaN. + Null-entries are ignored. + + The full population data. + + + + Estimates the unbiased population standard deviation from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population standard deviation from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population standard deviation from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + Null-entries are ignored. + + A subset of samples, sampled from the full population. + + + + Evaluates the standard deviation from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + The full population data. + + + + Evaluates the standard deviation from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + The full population data. + + + + Evaluates the standard deviation from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + Null-entries are ignored. + + The full population data. + + + + Estimates the unbiased population skewness from the provided samples. + Uses a normalizer (Bessel's correction; type 2). + Returns NaN if data has less than three entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population skewness from the provided samples. + Uses a normalizer (Bessel's correction; type 2). + Returns NaN if data has less than three entries or if any entry is NaN. + Null-entries are ignored. + + A subset of samples, sampled from the full population. + + + + Evaluates the skewness from the full population. + Does not use a normalizer and would thus be biased if applied to a subset (type 1). + Returns NaN if data has less than two entries or if any entry is NaN. + + The full population data. + + + + Evaluates the skewness from the full population. + Does not use a normalizer and would thus be biased if applied to a subset (type 1). + Returns NaN if data has less than two entries or if any entry is NaN. + Null-entries are ignored. + + The full population data. + + + + Estimates the unbiased population kurtosis from the provided samples. + Uses a normalizer (Bessel's correction; type 2). + Returns NaN if data has less than four entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population kurtosis from the provided samples. + Uses a normalizer (Bessel's correction; type 2). + Returns NaN if data has less than four entries or if any entry is NaN. + Null-entries are ignored. + + A subset of samples, sampled from the full population. + + + + Evaluates the kurtosis from the full population. + Does not use a normalizer and would thus be biased if applied to a subset (type 1). + Returns NaN if data has less than three entries or if any entry is NaN. + + The full population data. + + + + Evaluates the kurtosis from the full population. + Does not use a normalizer and would thus be biased if applied to a subset (type 1). + Returns NaN if data has less than three entries or if any entry is NaN. + Null-entries are ignored. + + The full population data. + + + + Estimates the sample mean and the unbiased population variance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or if any entry is NaN and NaN for variance if data has less than two entries or if any entry is NaN. + + The data to calculate the mean of. + The mean of the sample. + + + + Estimates the sample mean and the unbiased population variance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or if any entry is NaN and NaN for variance if data has less than two entries or if any entry is NaN. + + The data to calculate the mean of. + The mean of the sample. + + + + Estimates the sample mean and the unbiased population standard deviation from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or if any entry is NaN and NaN for standard deviation if data has less than two entries or if any entry is NaN. + + The data to calculate the mean of. + The mean of the sample. + + + + Estimates the sample mean and the unbiased population standard deviation from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or if any entry is NaN and NaN for standard deviation if data has less than two entries or if any entry is NaN. + + The data to calculate the mean of. + The mean of the sample. + + + + Estimates the unbiased population skewness and kurtosis from the provided samples in a single pass. + Uses a normalizer (Bessel's correction; type 2). + + A subset of samples, sampled from the full population. + + + + Evaluates the skewness and kurtosis from the full population. + Does not use a normalizer and would thus be biased if applied to a subset (type 1). + + The full population data. + + + + Estimates the unbiased population covariance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population covariance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population covariance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + Null-entries are ignored. + + A subset of samples, sampled from the full population. + A subset of samples, sampled from the full population. + + + + Evaluates the population covariance from the provided full populations. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + The full population data. + The full population data. + + + + Evaluates the population covariance from the provided full populations. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + The full population data. + The full population data. + + + + Evaluates the population covariance from the provided full populations. + On a dataset of size N will use an N normalize and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + Null-entries are ignored. + + The full population data. + The full population data. + + + + Evaluates the root mean square (RMS) also known as quadratic mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the RMS of. + + + + Evaluates the root mean square (RMS) also known as quadratic mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the RMS of. + + + + Evaluates the root mean square (RMS) also known as quadratic mean. + Returns NaN if data is empty or if any entry is NaN. + Null-entries are ignored. + + The data to calculate the mean of. + + + + Estimates the sample median from the provided samples (R8). + + The data sample sequence. + + + + Estimates the sample median from the provided samples (R8). + + The data sample sequence. + + + + Estimates the sample median from the provided samples (R8). + + The data sample sequence. + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the p-Percentile value from the provided samples. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + Percentile selector, between 0 and 100 (inclusive). + + + + Estimates the p-Percentile value from the provided samples. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + Percentile selector, between 0 and 100 (inclusive). + + + + Estimates the p-Percentile value from the provided samples. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + Percentile selector, between 0 and 100 (inclusive). + + + + Estimates the p-Percentile value from the provided samples. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the p-Percentile value from the provided samples. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the p-Percentile value from the provided samples. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the first quartile value from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the first quartile value from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the first quartile value from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the third quartile value from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the third quartile value from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the third quartile value from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the inter-quartile range from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the inter-quartile range from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the inter-quartile range from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates {min, lower-quantile, median, upper-quantile, max} from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates {min, lower-quantile, median, upper-quantile, max} from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates {min, lower-quantile, median, upper-quantile, max} from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Returns the order statistic (order 1..N) from the provided samples. + + The data sample sequence. + One-based order of the statistic, must be between 1 and N (inclusive). + + + + Returns the order statistic (order 1..N) from the provided samples. + + The data sample sequence. + One-based order of the statistic, must be between 1 and N (inclusive). + + + + Returns the order statistic (order 1..N) from the provided samples. + + The data sample sequence. + + + + Returns the order statistic (order 1..N) from the provided samples. + + The data sample sequence. + + + + Evaluates the rank of each entry of the provided samples. + The rank definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Evaluates the rank of each entry of the provided samples. + The rank definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Evaluates the rank of each entry of the provided samples. + The rank definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Estimates the quantile tau from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile value. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Estimates the quantile tau from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile value. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Estimates the quantile tau from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile value. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Estimates the quantile tau from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Estimates the quantile tau from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Estimates the quantile tau from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Estimates the empirical cumulative distribution function (CDF) at x from the provided samples. + + The data sample sequence. + The value where to estimate the CDF at. + + + + Estimates the empirical cumulative distribution function (CDF) at x from the provided samples. + + The data sample sequence. + The value where to estimate the CDF at. + + + + Estimates the empirical cumulative distribution function (CDF) at x from the provided samples. + + The data sample sequence. + The value where to estimate the CDF at. + + + + Estimates the empirical cumulative distribution function (CDF) at x from the provided samples. + + The data sample sequence. + + + + Estimates the empirical cumulative distribution function (CDF) at x from the provided samples. + + The data sample sequence. + + + + Estimates the empirical cumulative distribution function (CDF) at x from the provided samples. + + The data sample sequence. + + + + Estimates the empirical inverse CDF at tau from the provided samples. + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + + + + Estimates the empirical inverse CDF at tau from the provided samples. + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + + + + Estimates the empirical inverse CDF at tau from the provided samples. + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + + + + Estimates the empirical inverse CDF at tau from the provided samples. + + The data sample sequence. + + + + Estimates the empirical inverse CDF at tau from the provided samples. + + The data sample sequence. + + + + Estimates the empirical inverse CDF at tau from the provided samples. + + The data sample sequence. + + + + Calculates the entropy of a stream of double values in bits. + Returns NaN if any of the values in the stream are NaN. + + The data sample sequence. + + + + Calculates the entropy of a stream of double values in bits. + Returns NaN if any of the values in the stream are NaN. + Null-entries are ignored. + + The data sample sequence. + + + + Evaluates the sample mean over a moving window, for each samples. + Returns NaN if no data is empty or if any entry is NaN. + + The sample stream to calculate the mean of. + The number of last samples to consider. + + + + Statistics operating on an IEnumerable in a single pass, without keeping the full data in memory. + Can be used in a streaming way, e.g. on large datasets not fitting into memory. + + + + + + + + Returns the smallest value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the smallest value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the largest value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the largest value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the smallest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the smallest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the largest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the largest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the smallest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the smallest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the largest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the largest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the arithmetic sample mean from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the arithmetic sample mean from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the geometric mean of the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the geometric mean of the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the harmonic mean of the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the harmonic mean of the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the unbiased population variance from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the unbiased population variance from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the population variance from the full population provided as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the population variance from the full population provided as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the unbiased population standard deviation from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the unbiased population standard deviation from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the population standard deviation from the full population provided as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the population standard deviation from the full population provided as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population variance from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN, and NaN for variance if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population variance from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN, and NaN for variance if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population standard deviation from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN, and NaN for standard deviation if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population standard deviation from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN, and NaN for standard deviation if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the unbiased population covariance from the provided two sample enumerable sequences, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + First sample stream. + Second sample stream. + + + + Estimates the unbiased population covariance from the provided two sample enumerable sequences, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + First sample stream. + Second sample stream. + + + + Evaluates the population covariance from the full population provided as two enumerable sequences, in a single pass without memoization. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + First population stream. + Second population stream. + + + + Evaluates the population covariance from the full population provided as two enumerable sequences, in a single pass without memoization. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + First population stream. + Second population stream. + + + + Estimates the root mean square (RMS) also known as quadratic mean from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the root mean square (RMS) also known as quadratic mean from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Calculates the entropy of a stream of double values. + Returns NaN if any of the values in the stream are NaN. + + The input stream to evaluate. + + + + + Used to simplify parallel code, particularly between the .NET 4.0 and Silverlight Code. + + + + + Executes a for loop in which iterations may run in parallel. + + The start index, inclusive. + The end index, exclusive. + The body to be invoked for each iteration range. + + + + Executes a for loop in which iterations may run in parallel. + + The start index, inclusive. + The end index, exclusive. + The partition size for splitting work into smaller pieces. + The body to be invoked for each iteration range. + + + + Executes each of the provided actions inside a discrete, asynchronous task. + + An array of actions to execute. + The actions array contains a null element. + At least one invocation of the actions threw an exception. + + + + Selects an item (such as Max or Min). + + Starting index of the loop. + Ending index of the loop + The function to select items over a subset. + The function to select the item of selection from the subsets. + The selected value. + + + + Selects an item (such as Max or Min). + + The array to iterate over. + The function to select items over a subset. + The function to select the item of selection from the subsets. + The selected value. + + + + Selects an item (such as Max or Min). + + Starting index of the loop. + Ending index of the loop + The function to select items over a subset. + The function to select the item of selection from the subsets. + Default result of the reduce function on an empty set. + The selected value. + + + + Selects an item (such as Max or Min). + + The array to iterate over. + The function to select items over a subset. + The function to select the item of selection from the subsets. + Default result of the reduce function on an empty set. + The selected value. + + + + Double-precision trigonometry toolkit. + + + + + Constant to convert a degree to grad. + + + + + Converts a degree (360-periodic) angle to a grad (400-periodic) angle. + + The degree to convert. + The converted grad angle. + + + + Converts a degree (360-periodic) angle to a radian (2*Pi-periodic) angle. + + The degree to convert. + The converted radian angle. + + + + Converts a grad (400-periodic) angle to a degree (360-periodic) angle. + + The grad to convert. + The converted degree. + + + + Converts a grad (400-periodic) angle to a radian (2*Pi-periodic) angle. + + The grad to convert. + The converted radian. + + + + Converts a radian (2*Pi-periodic) angle to a degree (360-periodic) angle. + + The radian to convert. + The converted degree. + + + + Converts a radian (2*Pi-periodic) angle to a grad (400-periodic) angle. + + The radian to convert. + The converted grad. + + + + Normalized Sinc function. sinc(x) = sin(pi*x)/(pi*x). + + + + + Trigonometric Sine of an angle in radian, or opposite / hypotenuse. + + The angle in radian. + The sine of the radian angle. + + + + Trigonometric Sine of a Complex number. + + The complex value. + The sine of the complex number. + + + + Trigonometric Cosine of an angle in radian, or adjacent / hypotenuse. + + The angle in radian. + The cosine of an angle in radian. + + + + Trigonometric Cosine of a Complex number. + + The complex value. + The cosine of a complex number. + + + + Trigonometric Tangent of an angle in radian, or opposite / adjacent. + + The angle in radian. + The tangent of the radian angle. + + + + Trigonometric Tangent of a Complex number. + + The complex value. + The tangent of the complex number. + + + + Trigonometric Cotangent of an angle in radian, or adjacent / opposite. Reciprocal of the tangent. + + The angle in radian. + The cotangent of an angle in radian. + + + + Trigonometric Cotangent of a Complex number. + + The complex value. + The cotangent of the complex number. + + + + Trigonometric Secant of an angle in radian, or hypotenuse / adjacent. Reciprocal of the cosine. + + The angle in radian. + The secant of the radian angle. + + + + Trigonometric Secant of a Complex number. + + The complex value. + The secant of the complex number. + + + + Trigonometric Cosecant of an angle in radian, or hypotenuse / opposite. Reciprocal of the sine. + + The angle in radian. + Cosecant of an angle in radian. + + + + Trigonometric Cosecant of a Complex number. + + The complex value. + The cosecant of a complex number. + + + + Trigonometric principal Arc Sine in radian + + The opposite for a unit hypotenuse (i.e. opposite / hyptenuse). + The angle in radian. + + + + Trigonometric principal Arc Sine of this Complex number. + + The complex value. + The arc sine of a complex number. + + + + Trigonometric principal Arc Cosine in radian + + The adjacent for a unit hypotenuse (i.e. adjacent / hypotenuse). + The angle in radian. + + + + Trigonometric principal Arc Cosine of this Complex number. + + The complex value. + The arc cosine of a complex number. + + + + Trigonometric principal Arc Tangent in radian + + The opposite for a unit adjacent (i.e. opposite / adjacent). + The angle in radian. + + + + Trigonometric principal Arc Tangent of this Complex number. + + The complex value. + The arc tangent of a complex number. + + + + Trigonometric principal Arc Cotangent in radian + + The adjacent for a unit opposite (i.e. adjacent / opposite). + The angle in radian. + + + + Trigonometric principal Arc Cotangent of this Complex number. + + The complex value. + The arc cotangent of a complex number. + + + + Trigonometric principal Arc Secant in radian + + The hypotenuse for a unit adjacent (i.e. hypotenuse / adjacent). + The angle in radian. + + + + Trigonometric principal Arc Secant of this Complex number. + + The complex value. + The arc secant of a complex number. + + + + Trigonometric principal Arc Cosecant in radian + + The hypotenuse for a unit opposite (i.e. hypotenuse / opposite). + The angle in radian. + + + + Trigonometric principal Arc Cosecant of this Complex number. + + The complex value. + The arc cosecant of a complex number. + + + + Hyperbolic Sine + + The hyperbolic angle, i.e. the area of the hyperbolic sector. + The hyperbolic sine of the angle. + + + + Hyperbolic Sine of a Complex number. + + The complex value. + The hyperbolic sine of a complex number. + + + + Hyperbolic Cosine + + The hyperbolic angle, i.e. the area of the hyperbolic sector. + The hyperbolic Cosine of the angle. + + + + Hyperbolic Cosine of a Complex number. + + The complex value. + The hyperbolic cosine of a complex number. + + + + Hyperbolic Tangent in radian + + The hyperbolic angle, i.e. the area of the hyperbolic sector. + The hyperbolic tangent of the angle. + + + + Hyperbolic Tangent of a Complex number. + + The complex value. + The hyperbolic tangent of a complex number. + + + + Hyperbolic Cotangent + + The hyperbolic angle, i.e. the area of the hyperbolic sector. + The hyperbolic cotangent of the angle. + + + + Hyperbolic Cotangent of a Complex number. + + The complex value. + The hyperbolic cotangent of a complex number. + + + + Hyperbolic Secant + + The hyperbolic angle, i.e. the area of the hyperbolic sector. + The hyperbolic secant of the angle. + + + + Hyperbolic Secant of a Complex number. + + The complex value. + The hyperbolic secant of a complex number. + + + + Hyperbolic Cosecant + + The hyperbolic angle, i.e. the area of the hyperbolic sector. + The hyperbolic cosecant of the angle. + + + + Hyperbolic Cosecant of a Complex number. + + The complex value. + The hyperbolic cosecant of a complex number. + + + + Hyperbolic Area Sine + + The real value. + The hyperbolic angle, i.e. the area of its hyperbolic sector. + + + + Hyperbolic Area Sine of this Complex number. + + The complex value. + The hyperbolic arc sine of a complex number. + + + + Hyperbolic Area Cosine + + The real value. + The hyperbolic angle, i.e. the area of its hyperbolic sector. + + + + Hyperbolic Area Cosine of this Complex number. + + The complex value. + The hyperbolic arc cosine of a complex number. + + + + Hyperbolic Area Tangent + + The real value. + The hyperbolic angle, i.e. the area of its hyperbolic sector. + + + + Hyperbolic Area Tangent of this Complex number. + + The complex value. + The hyperbolic arc tangent of a complex number. + + + + Hyperbolic Area Cotangent + + The real value. + The hyperbolic angle, i.e. the area of its hyperbolic sector. + + + + Hyperbolic Area Cotangent of this Complex number. + + The complex value. + The hyperbolic arc cotangent of a complex number. + + + + Hyperbolic Area Secant + + The real value. + The hyperbolic angle, i.e. the area of its hyperbolic sector. + + + + Hyperbolic Area Secant of this Complex number. + + The complex value. + The hyperbolic arc secant of a complex number. + + + + Hyperbolic Area Cosecant + + The real value. + The hyperbolic angle, i.e. the area of its hyperbolic sector. + + + + Hyperbolic Area Cosecant of this Complex number. + + The complex value. + The hyperbolic arc cosecant of a complex number. + + + + Hamming window. Named after Richard Hamming. + Symmetric version, useful e.g. for filter design purposes. + + + + + Hamming window. Named after Richard Hamming. + Periodic version, useful e.g. for FFT purposes. + + + + + Hann window. Named after Julius von Hann. + Symmetric version, useful e.g. for filter design purposes. + + + + + Hann window. Named after Julius von Hann. + Periodic version, useful e.g. for FFT purposes. + + + + + Cosine window. + Symmetric version, useful e.g. for filter design purposes. + + + + + Cosine window. + Periodic version, useful e.g. for FFT purposes. + + + + + Lanczos window. + Symmetric version, useful e.g. for filter design purposes. + + + + + Lanczos window. + Periodic version, useful e.g. for FFT purposes. + + + + + Gauss window. + + + + + Blackman window. + + + + + Blackman-Harris window. + + + + + Blackman-Nuttall window. + + + + + Bartlett window. + + + + + Bartlett-Hann window. + + + + + Nuttall window. + + + + + Flat top window. + + + + + Uniform rectangular (dirichlet) window. + + + + + Triangular window. + + + + + A strongly-typed resource class, for looking up localized strings, etc. + + + + + Returns the cached ResourceManager instance used by this class. + + + + + Overrides the current thread's CurrentUICulture property for all + resource lookups using this strongly typed resource class. + + + + + Looks up a localized string similar to The accuracy couldn't be reached with the specified number of iterations.. + + + + + Looks up a localized string similar to The array arguments must have the same length.. + + + + + Looks up a localized string similar to The given array has the wrong length. Should be {0}.. + + + + + Looks up a localized string similar to The argument must be between 0 and 1.. + + + + + Looks up a localized string similar to Value cannot be in the range -1 < x < 1.. + + + + + Looks up a localized string similar to Value must be even.. + + + + + Looks up a localized string similar to The histogram does not contain the value.. + + + + + Looks up a localized string similar to Value is expected to be between {0} and {1} (including {0} and {1}).. + + + + + Looks up a localized string similar to At least one item of {0} is a null reference (Nothing in Visual Basic).. + + + + + Looks up a localized string similar to Value must be greater than or equal to one.. + + + + + Looks up a localized string similar to Matrix dimensions must agree.. + + + + + Looks up a localized string similar to Matrix dimensions must agree: {0}.. + + + + + Looks up a localized string similar to Matrix dimensions must agree: op1 is {0}, op2 is {1}.. + + + + + Looks up a localized string similar to Matrix dimensions must agree: op1 is {0}, op2 is {1}, op3 is {2}.. + + + + + Looks up a localized string similar to The requested matrix does not exist.. + + + + + Looks up a localized string similar to The matrix indices must not be out of range of the given matrix.. + + + + + Looks up a localized string similar to Matrix must not be rank deficient.. + + + + + Looks up a localized string similar to Matrix must not be singular.. + + + + + Looks up a localized string similar to Matrix must be positive definite.. + + + + + Looks up a localized string similar to Matrix column dimensions must agree.. + + + + + Looks up a localized string similar to Matrix row dimensions must agree.. + + + + + Looks up a localized string similar to Matrix must have exactly one column.. + + + + + Looks up a localized string similar to Matrix must have exactly one column and row, thus have only one cell.. + + + + + Looks up a localized string similar to Matrix must have exactly one row.. + + + + + Looks up a localized string similar to Matrix must be square.. + + + + + Looks up a localized string similar to Matrix must be symmetric.. + + + + + Looks up a localized string similar to Matrix must be symmetric positive definite.. + + + + + Looks up a localized string similar to In the specified range, the exclusive maximum must be greater than the inclusive minimum.. + + + + + Looks up a localized string similar to In the specified range, the minimum is greater than maximum.. + + + + + Looks up a localized string similar to Value must be positive.. + + + + + Looks up a localized string similar to Value must neither be infinite nor NaN.. + + + + + Looks up a localized string similar to Value must not be negative (zero is ok).. + + + + + Looks up a localized string similar to {0} is a null reference (Nothing in Visual Basic).. + + + + + Looks up a localized string similar to Value must be odd.. + + + + + Looks up a localized string similar to {0} must be greater than {1}.. + + + + + Looks up a localized string similar to {0} must be greater than or equal to {1}.. + + + + + Looks up a localized string similar to {0} must be smaller than {1}.. + + + + + Looks up a localized string similar to {0} must be smaller than or equal to {1}.. + + + + + Looks up a localized string similar to The chosen parameter set is invalid (probably some value is out of range).. + + + + + Looks up a localized string similar to The given expression does not represent a complex number.. + + + + + Looks up a localized string similar to Value must be positive (and not zero).. + + + + + Looks up a localized string similar to Size must be a Power of Two.. + + + + + Looks up a localized string similar to Size must be a Power of Two in every dimension.. + + + + + Looks up a localized string similar to The range between {0} and {1} must be less than or equal to {2}.. + + + + + Looks up a localized string similar to Arguments must be different objects.. + + + + + Looks up a localized string similar to Array must have exactly one dimension (and not be null).. + + + + + Looks up a localized string similar to Value is too large.. + + + + + Looks up a localized string similar to Value is too large for the current iteration limit.. + + + + + Looks up a localized string similar to Type mismatch.. + + + + + Looks up a localized string similar to The upper bound must be strictly larger than the lower bound.. + + + + + Looks up a localized string similar to The upper bound must be at least as large as the lower bound.. + + + + + Looks up a localized string similar to Array length must be a multiple of {0}.. + + + + + Looks up a localized string similar to All vectors must have the same dimensionality.. + + + + + Looks up a localized string similar to The vector must have 3 dimensions.. + + + + + Looks up a localized string similar to The given array is too small. It must be at least {0} long.. + + + + + Looks up a localized string similar to Big endian files are not supported.. + + + + + Looks up a localized string similar to The supplied collection is empty.. + + + + + Looks up a localized string similar to Complex matrices are not supported.. + + + + + Looks up a localized string similar to An algorithm failed to converge.. + + + + + Looks up a localized string similar to The sample size must be larger than the given degrees of freedom.. + + + + + Looks up a localized string similar to This feature is not implemented yet (but is planned).. + + + + + Looks up a localized string similar to The given file doesn't exist.. + + + + + Looks up a localized string similar to Sample points should be sorted in strictly ascending order. + + + + + Looks up a localized string similar to All sample points should be unique.. + + + + + Looks up a localized string similar to Invalid parameterization for the distribution.. + + + + + Looks up a localized string similar to Invalid Left Boundary Condition.. + + + + + Looks up a localized string similar to The operation could not be performed because the accumulator is empty.. + + + + + Looks up a localized string similar to The operation could not be performed because the histogram is empty.. + + + + + Looks up a localized string similar to Not enough points in the distribution.. + + + + + Looks up a localized string similar to No Samples Provided. Preparation Required.. + + + + + Looks up a localized string similar to An invalid parameter was passed to a native method.. + + + + + Looks up a localized string similar to An invalid parameter was passed to a native method, parameter number : {0}. + + + + + Looks up a localized string similar to Invalid Right Boundary Condition.. + + + + + Looks up a localized string similar to Lag must be positive. + + + + + Looks up a localized string similar to Lag must be smaller than the sample size. + + + + + Looks up a localized string similar to ddd MMM dd HH:mm:ss yyyy. + + + + + Looks up a localized string similar to Matrices can not be empty and must have at least one row and column.. + + + + + Looks up a localized string similar to The number of columns of a matrix must be positive.. + + + + + Looks up a localized string similar to Matrix must be in sparse storage format. + + + + + Looks up a localized string similar to The number of rows of a matrix must be positive.. + + + + + Looks up a localized string similar to The number of rows or columns of a matrix must be positive.. + + + + + Looks up a localized string similar to Unable to allocate native memory.. + + + + + Looks up a localized string similar to Only 1 and 2 dimensional arrays are supported.. + + + + + Looks up a localized string similar to Data must contain at least {0} values.. + + + + + Looks up a localized string similar to Name cannot contain a space. name: {0}. + + + + + Looks up a localized string similar to {0} is not a supported type.. + + + + + Looks up a localized string similar to Algorithm experience a numerical break down + . + + + + + Looks up a localized string similar to The two arguments can't be compared (maybe they are part of a partial ordering?). + + + + + Looks up a localized string similar to The integer array does not represent a valid permutation.. + + + + + Looks up a localized string similar to The sampler's proposal distribution is not upper bounding the target density.. + + + + + Looks up a localized string similar to A regression of the requested order requires at least {0} samples. Only {1} samples have been provided. . + + + + + Looks up a localized string similar to The algorithm has failed, exceeded the number of iterations allowed or there is no root within the provided bounds.. + + + + + Looks up a localized string similar to The algorithm has failed, exceeded the number of iterations allowed or there is no root within the provided bounds. Consider to use RobustNewtonRaphson instead.. + + + + + Looks up a localized string similar to The lower and upper bounds must bracket a single root.. + + + + + Looks up a localized string similar to The algorithm ended without root in the range.. + + + + + Looks up a localized string similar to The number of rows must greater than or equal to the number of columns.. + + + + + Looks up a localized string similar to All sample vectors must have the same length. However, vectors with disagreeing length {0} and {1} have been provided. A sample with index i is given by the value at index i of each provided vector.. + + + + + Looks up a localized string similar to U is singular, and the inversion could not be completed.. + + + + + Looks up a localized string similar to U is singular, and the inversion could not be completed. The {0}-th diagonal element of the factor U is zero.. + + + + + Looks up a localized string similar to The singular vectors were not computed.. + + + + + Looks up a localized string similar to This special case is not supported yet (but is planned).. + + + + + Looks up a localized string similar to The given stop criterion already exist in the collection.. + + + + + Looks up a localized string similar to There is no stop criterion in the collection.. + + + + + Looks up a localized string similar to String parameter cannot be empty or null.. + + + + + Looks up a localized string similar to We only support sparse matrix with less than int.MaxValue elements.. + + + + + Looks up a localized string similar to The moment of the distribution is undefined.. + + + + + Looks up a localized string similar to A user defined provider has not been specified.. + + + + + Looks up a localized string similar to User work buffers are not supported by this provider.. + + + + + Looks up a localized string similar to Vectors can not be empty and must have at least one element.. + + + + + Looks up a localized string similar to The given work array is too small. Check work[0] for the corret size.. + + +
+
diff --git a/src/packages/MathNet.Numerics.3.16.0/lib/portable-net45+netcore45+wp8+MonoAndroid1+MonoTouch1/MathNet.Numerics.dll b/src/packages/MathNet.Numerics.3.16.0/lib/portable-net45+netcore45+wp8+MonoAndroid1+MonoTouch1/MathNet.Numerics.dll new file mode 100644 index 0000000..beeebfb Binary files /dev/null and b/src/packages/MathNet.Numerics.3.16.0/lib/portable-net45+netcore45+wp8+MonoAndroid1+MonoTouch1/MathNet.Numerics.dll differ diff --git a/src/packages/MathNet.Numerics.3.16.0/lib/portable-net45+netcore45+wpa81+wp8+MonoAndroid1+MonoTouch1/MathNet.Numerics.XML b/src/packages/MathNet.Numerics.3.16.0/lib/portable-net45+netcore45+wpa81+wp8+MonoAndroid1+MonoTouch1/MathNet.Numerics.XML new file mode 100644 index 0000000..9c9b21f --- /dev/null +++ b/src/packages/MathNet.Numerics.3.16.0/lib/portable-net45+netcore45+wpa81+wp8+MonoAndroid1+MonoTouch1/MathNet.Numerics.XML @@ -0,0 +1,49706 @@ + + + + MathNet.Numerics + + + + + Useful extension methods for Arrays. + + + + + Copies the values from on array to another. + + The source array. + The destination array. + + + + Copies the values from on array to another. + + The source array. + The destination array. + + + + Copies the values from on array to another. + + The source array. + The destination array. + + + + Copies the values from on array to another. + + The source array. + The destination array. + + + + Enumerative Combinatorics and Counting. + + + + + Count the number of possible variations without repetition. + The order matters and each object can be chosen only once. + + Number of elements in the set. + Number of elements to choose from the set. Each element is chosen at most once. + Maximum number of distinct variations. + + + + Count the number of possible variations with repetition. + The order matters and each object can be chosen more than once. + + Number of elements in the set. + Number of elements to choose from the set. Each element is chosen 0, 1 or multiple times. + Maximum number of distinct variations with repetition. + + + + Count the number of possible combinations without repetition. + The order does not matter and each object can be chosen only once. + + Number of elements in the set. + Number of elements to choose from the set. Each element is chosen at most once. + Maximum number of combinations. + + + + Count the number of possible combinations with repetition. + The order does not matter and an object can be chosen more than once. + + Number of elements in the set. + Number of elements to choose from the set. Each element is chosen 0, 1 or multiple times. + Maximum number of combinations with repetition. + + + + Count the number of possible permutations (without repetition). + + Number of (distinguishable) elements in the set. + Maximum number of permutations without repetition. + + + + Generate a random permutation, without repetition, by generating the index numbers 0 to N-1 and shuffle them randomly. + Implemented using Fisher-Yates Shuffling. + + An array of length N that contains (in any order) the integers of the interval [0, N). + Number of (distinguishable) elements in the set. + The random number generator to use. Optional; the default random source will be used if null. + + + + Select a random permutation, without repetition, from a data array by reordering the provided array in-place. + Implemented using Fisher-Yates Shuffling. The provided data array will be modified. + + The data array to be reordered. The array will be modified by this routine. + The random number generator to use. Optional; the default random source will be used if null. + + + + Select a random permutation from a data sequence by returning the provided data in random order. + Implemented using Fisher-Yates Shuffling. + + The data elements to be reordered. + The random number generator to use. Optional; the default random source will be used if null. + + + + Generate a random combination, without repetition, by randomly selecting some of N elements. + + Number of elements in the set. + The random number generator to use. Optional; the default random source will be used if null. + Boolean mask array of length N, for each item true if it is selected. + + + + Generate a random combination, without repetition, by randomly selecting k of N elements. + + Number of elements in the set. + Number of elements to choose from the set. Each element is chosen at most once. + The random number generator to use. Optional; the default random source will be used if null. + Boolean mask array of length N, for each item true if it is selected. + + + + Select a random combination, without repetition, from a data sequence by selecting k elements in original order. + + The data source to choose from. + Number of elements (k) to choose from the data set. Each element is chosen at most once. + The random number generator to use. Optional; the default random source will be used if null. + The chosen combination, in the original order. + + + + Generates a random combination, with repetition, by randomly selecting k of N elements. + + Number of elements in the set. + Number of elements to choose from the set. Elements can be chosen more than once. + The random number generator to use. Optional; the default random source will be used if null. + Integer mask array of length N, for each item the number of times it was selected. + + + + Select a random combination, with repetition, from a data sequence by selecting k elements in original order. + + The data source to choose from. + Number of elements (k) to choose from the data set. Elements can be chosen more than once. + The random number generator to use. Optional; the default random source will be used if null. + The chosen combination with repetition, in the original order. + + + + Generate a random variation, without repetition, by randomly selecting k of n elements with order. + Implemented using partial Fisher-Yates Shuffling. + + Number of elements in the set. + Number of elements to choose from the set. Each element is chosen at most once. + The random number generator to use. Optional; the default random source will be used if null. + An array of length K that contains the indices of the selections as integers of the interval [0, N). + + + + Select a random variation, without repetition, from a data sequence by randomly selecting k elements in random order. + Implemented using partial Fisher-Yates Shuffling. + + The data source to choose from. + Number of elements (k) to choose from the set. Each element is chosen at most once. + The random number generator to use. Optional; the default random source will be used if null. + The chosen variation, in random order. + + + + Generate a random variation, with repetition, by randomly selecting k of n elements with order. + + Number of elements in the set. + Number of elements to choose from the set. Elements can be chosen more than once. + The random number generator to use. Optional; the default random source will be used if null. + An array of length K that contains the indices of the selections as integers of the interval [0, N). + + + + Select a random variation, with repetition, from a data sequence by randomly selecting k elements in random order. + + The data source to choose from. + Number of elements (k) to choose from the data set. Elements can be chosen more than once. + The random number generator to use. Optional; the default random source will be used if null. + The chosen variation with repetition, in random order. + + + + 32-bit single precision complex numbers class. + + + + The class Complex32 provides all elementary operations + on complex numbers. All the operators +, -, + *, /, ==, != are defined in the + canonical way. Additional complex trigonometric functions + are also provided. Note that the Complex32 structures + has two special constant values and + . + + + + Complex32 x = new Complex32(1f,2f); + Complex32 y = Complex32.FromPolarCoordinates(1f, Math.Pi); + Complex32 z = (x + y) / (x - y); + + + + For mathematical details about complex numbers, please + have a look at the + Wikipedia + + + + + + The real component of the complex number. + + + + + The imaginary component of the complex number. + + + + + Initializes a new instance of the Complex32 structure with the given real + and imaginary parts. + + The value for the real component. + The value for the imaginary component. + + + + Creates a complex number from a point's polar coordinates. + + A complex number. + The magnitude, which is the distance from the origin (the intersection of the x-axis and the y-axis) to the number. + The phase, which is the angle from the line to the horizontal axis, measured in radians. + + + + Returns a new instance + with a real number equal to zero and an imaginary number equal to zero. + + + + + Returns a new instance + with a real number equal to one and an imaginary number equal to zero. + + + + + Returns a new instance + with a real number equal to zero and an imaginary number equal to one. + + + + + Returns a new instance + with real and imaginary numbers positive infinite. + + + + + Returns a new instance + with real and imaginary numbers not a number. + + + + + Gets the real component of the complex number. + + The real component of the complex number. + + + + Gets the real imaginary component of the complex number. + + The real imaginary component of the complex number. + + + + Gets the phase or argument of this Complex32. + + + Phase always returns a value bigger than negative Pi and + smaller or equal to Pi. If this Complex32 is zero, the Complex32 + is assumed to be positive real with an argument of zero. + + The phase or argument of this Complex32 + + + + Gets the magnitude (or absolute value) of a complex number. + + Assuming that magnitude of (inf,a) and (a,inf) and (inf,inf) is inf and (NaN,a), (a,NaN) and (NaN,NaN) is NaN + The magnitude of the current instance. + + + + Gets the squared magnitude (or squared absolute value) of a complex number. + + The squared magnitude of the current instance. + + + + Gets the unity of this complex (same argument, but on the unit circle; exp(I*arg)) + + The unity of this Complex32. + + + + Gets a value indicating whether the Complex32 is zero. + + true if this instance is zero; otherwise, false. + + + + Gets a value indicating whether the Complex32 is one. + + true if this instance is one; otherwise, false. + + + + Gets a value indicating whether the Complex32 is the imaginary unit. + + true if this instance is ImaginaryOne; otherwise, false. + + + + Gets a value indicating whether the provided Complex32evaluates + to a value that is not a number. + + + true if this instance is ; otherwise, + false. + + + + + Gets a value indicating whether the provided Complex32 evaluates to an + infinite value. + + + true if this instance is infinite; otherwise, false. + + + True if it either evaluates to a complex infinity + or to a directed infinity. + + + + + Gets a value indicating whether the provided Complex32 is real. + + true if this instance is a real number; otherwise, false. + + + + Gets a value indicating whether the provided Complex32 is real and not negative, that is >= 0. + + + true if this instance is real nonnegative number; otherwise, false. + + + + + Exponential of this Complex32 (exp(x), E^x). + + + The exponential of this complex number. + + + + + Natural Logarithm of this Complex32 (Base E). + + The natural logarithm of this complex number. + + + + Common Logarithm of this Complex32 (Base 10). + + The common logarithm of this complex number. + + + + Logarithm of this Complex32 with custom base. + + The logarithm of this complex number. + + + + Raise this Complex32 to the given value. + + + The exponent. + + + The complex number raised to the given exponent. + + + + + Raise this Complex32 to the inverse of the given value. + + + The root exponent. + + + The complex raised to the inverse of the given exponent. + + + + + The Square (power 2) of this Complex32 + + + The square of this complex number. + + + + + The Square Root (power 1/2) of this Complex32 + + + The square root of this complex number. + + + + + Evaluate all square roots of this Complex32. + + + + + Evaluate all cubic roots of this Complex32. + + + + + Equality test. + + One of complex numbers to compare. + The other complex numbers to compare. + true if the real and imaginary components of the two complex numbers are equal; false otherwise. + + + + Inequality test. + + One of complex numbers to compare. + The other complex numbers to compare. + true if the real or imaginary components of the two complex numbers are not equal; false otherwise. + + + + Unary addition. + + The complex number to operate on. + Returns the same complex number. + + + + Unary minus. + + The complex number to operate on. + The negated value of the . + + + Addition operator. Adds two complex numbers together. + The result of the addition. + One of the complex numbers to add. + The other complex numbers to add. + + + Subtraction operator. Subtracts two complex numbers. + The result of the subtraction. + The complex number to subtract from. + The complex number to subtract. + + + Addition operator. Adds a complex number and float together. + The result of the addition. + The complex numbers to add. + The float value to add. + + + Subtraction operator. Subtracts float value from a complex value. + The result of the subtraction. + The complex number to subtract from. + The float value to subtract. + + + Addition operator. Adds a complex number and float together. + The result of the addition. + The float value to add. + The complex numbers to add. + + + Subtraction operator. Subtracts complex value from a float value. + The result of the subtraction. + The float vale to subtract from. + The complex value to subtract. + + + Multiplication operator. Multiplies two complex numbers. + The result of the multiplication. + One of the complex numbers to multiply. + The other complex number to multiply. + + + Multiplication operator. Multiplies a complex number with a float value. + The result of the multiplication. + The float value to multiply. + The complex number to multiply. + + + Multiplication operator. Multiplies a complex number with a float value. + The result of the multiplication. + The complex number to multiply. + The float value to multiply. + + + Division operator. Divides a complex number by another. + Enhanced Smith's algorithm for dividing two complex numbers + + The result of the division. + The dividend. + The divisor. + + + + Helper method for dividing. + + Re first + Im first + Re second + Im second + + + + + Division operator. Divides a float value by a complex number. + Algorithm based on Smith's algorithm + + The result of the division. + The dividend. + The divisor. + + + Division operator. Divides a complex number by a float value. + The result of the division. + The dividend. + The divisor. + + + + Computes the conjugate of a complex number and returns the result. + + + + + Returns the multiplicative inverse of a complex number. + + + + + Converts the value of the current complex number to its equivalent string representation in Cartesian form. + + The string representation of the current instance in Cartesian form. + + + + Converts the value of the current complex number to its equivalent string representation + in Cartesian form by using the specified format for its real and imaginary parts. + + The string representation of the current instance in Cartesian form. + A standard or custom numeric format string. + + is not a valid format string. + + + + Converts the value of the current complex number to its equivalent string representation + in Cartesian form by using the specified culture-specific formatting information. + + The string representation of the current instance in Cartesian form, as specified by . + An object that supplies culture-specific formatting information. + + + Converts the value of the current complex number to its equivalent string representation + in Cartesian form by using the specified format and culture-specific format information for its real and imaginary parts. + The string representation of the current instance in Cartesian form, as specified by and . + A standard or custom numeric format string. + An object that supplies culture-specific formatting information. + + is not a valid format string. + + + + Checks if two complex numbers are equal. Two complex numbers are equal if their + corresponding real and imaginary components are equal. + + + Returns true if the two objects are the same object, or if their corresponding + real and imaginary components are equal, false otherwise. + + + The complex number to compare to with. + + + + + The hash code for the complex number. + + + The hash code of the complex number. + + + The hash code is calculated as + System.Math.Exp(ComplexMath.Absolute(complexNumber)). + + + + + Checks if two complex numbers are equal. Two complex numbers are equal if their + corresponding real and imaginary components are equal. + + + Returns true if the two objects are the same object, or if their corresponding + real and imaginary components are equal, false otherwise. + + + The complex number to compare to with. + + + + + Creates a complex number based on a string. The string can be in the + following formats (without the quotes): 'n', 'ni', 'n +/- ni', + 'ni +/- n', 'n,n', 'n,ni,' '(n,n)', or '(n,ni)', where n is a float. + + + A complex number containing the value specified by the given string. + + + the string to parse. + + + An that supplies culture-specific + formatting information. + + + + + Parse a part (real or complex) from a complex number. + + Start Token. + Is set to true if the part identified itself as being imaginary. + + An that supplies culture-specific + formatting information. + + Resulting part as float. + + + + + Converts the string representation of a complex number to a single-precision complex number equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex number to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will contain complex32.Zero. This parameter is passed uninitialized + + + + + Converts the string representation of a complex number to single-precision complex number equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex number to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will contain complex32.Zero. This parameter is passed uninitialized + + + + + Explicit conversion of a real decimal to a Complex32. + + The decimal value to convert. + The result of the conversion. + + + + Explicit conversion of a Complex to a Complex32. + + The decimal value to convert. + The result of the conversion. + + + + Implicit conversion of a real byte to a Complex32. + + The byte value to convert. + The result of the conversion. + + + + Implicit conversion of a real short to a Complex32. + + The short value to convert. + The result of the conversion. + + + + Implicit conversion of a signed byte to a Complex32. + + The signed byte value to convert. + The result of the conversion. + + + + Implicit conversion of a unsgined real short to a Complex32. + + The unsgined short value to convert. + The result of the conversion. + + + + Implicit conversion of a real int to a Complex32. + + The int value to convert. + The result of the conversion. + + + + Implicit conversion of a real long to a Complex32. + + The long value to convert. + The result of the conversion. + + + + Implicit conversion of a real uint to a Complex32. + + The uint value to convert. + The result of the conversion. + + + + Implicit conversion of a real ulong to a Complex32. + + The ulong value to convert. + The result of the conversion. + + + + Implicit conversion of a real float to a Complex32. + + The float value to convert. + The result of the conversion. + + + + Implicit conversion of a real double to a Complex32. + + The double value to convert. + The result of the conversion. + + + + Converts this Complex32 to a . + + A with the same values as this Complex32. + + + + Returns the additive inverse of a specified complex number. + + The result of the real and imaginary components of the value parameter multiplied by -1. + A complex number. + + + + Computes the conjugate of a complex number and returns the result. + + The conjugate of . + A complex number. + + + + Adds two complex numbers and returns the result. + + The sum of and . + The first complex number to add. + The second complex number to add. + + + + Subtracts one complex number from another and returns the result. + + The result of subtracting from . + The value to subtract from (the minuend). + The value to subtract (the subtrahend). + + + + Returns the product of two complex numbers. + + The product of the and parameters. + The first complex number to multiply. + The second complex number to multiply. + + + + Divides one complex number by another and returns the result. + + The quotient of the division. + The complex number to be divided. + The complex number to divide by. + + + + Returns the multiplicative inverse of a complex number. + + The reciprocal of . + A complex number. + + + + Returns the square root of a specified complex number. + + The square root of . + A complex number. + + + + Gets the absolute value (or magnitude) of a complex number. + + The absolute value of . + A complex number. + + + + Returns e raised to the power specified by a complex number. + + The number e raised to the power . + A complex number that specifies a power. + + + + Returns a specified complex number raised to a power specified by a complex number. + + The complex number raised to the power . + A complex number to be raised to a power. + A complex number that specifies a power. + + + + Returns a specified complex number raised to a power specified by a single-precision floating-point number. + + The complex number raised to the power . + A complex number to be raised to a power. + A single-precision floating-point number that specifies a power. + + + + Returns the natural (base e) logarithm of a specified complex number. + + The natural (base e) logarithm of . + A complex number. + + + + Returns the logarithm of a specified complex number in a specified base. + + The logarithm of in base . + A complex number. + The base of the logarithm. + + + + Returns the base-10 logarithm of a specified complex number. + + The base-10 logarithm of . + A complex number. + + + + Returns the sine of the specified complex number. + + The sine of . + A complex number. + + + + Returns the cosine of the specified complex number. + + The cosine of . + A complex number. + + + + Returns the tangent of the specified complex number. + + The tangent of . + A complex number. + + + + Returns the angle that is the arc sine of the specified complex number. + + The angle which is the arc sine of . + A complex number. + + + + Returns the angle that is the arc cosine of the specified complex number. + + The angle, measured in radians, which is the arc cosine of . + A complex number that represents a cosine. + + + + Returns the angle that is the arc tangent of the specified complex number. + + The angle that is the arc tangent of . + A complex number. + + + + Returns the hyperbolic sine of the specified complex number. + + The hyperbolic sine of . + A complex number. + + + + Returns the hyperbolic cosine of the specified complex number. + + The hyperbolic cosine of . + A complex number. + + + + Returns the hyperbolic tangent of the specified complex number. + + The hyperbolic tangent of . + A complex number. + + + + 64-bit double precision complex numbers class. + + + + The class Complex provides all elementary operations + on complex numbers. All the operators +, -, + *, /, ==, != are defined in the + canonical way. Additional complex trigonometric functions + are also provided. Note that the Complex structures + has two special constant values and + . + + + + Complex x = new Complex(1d, 2d); + Complex y = Complex.FromPolarCoordinates(1d, Math.Pi); + Complex z = (x + y) / (x - y); + + + + For mathematical details about complex numbers, please + have a look at the + Wikipedia + + + + + + The real component of the complex number. + + + + + The imaginary component of the complex number. + + + + + Initializes a new instance of the Complex structure with the given real + and imaginary parts. + + The value for the real component. + The value for the imaginary component. + + + + Creates a complex number from a point's polar coordinates. + + A complex number. + The magnitude, which is the distance from the origin (the intersection of the x-axis and the y-axis) to the number. + The phase, which is the angle from the line to the horizontal axis, measured in radians. + + + + Returns a new Complex instance + with a real number equal to zero and an imaginary number equal to zero. + + + + + Returns a new Complex instance + with a real number equal to one and an imaginary number equal to zero. + + + + + Returns a new Complex instance + with a real number equal to zero and an imaginary number equal to one. + + + + + Returns a new Complex instance + with real and imaginary numbers positive infinite. + + + + + Returns a new Complex instance + with real and imaginary numbers not a number. + + + + + Gets the real component of the complex number. + + The real component of the complex number. + + + + Gets the real imaginary component of the complex number. + + The real imaginary component of the complex number. + + + + Gets the phase or argument of this Complex. + + + Phase always returns a value bigger than negative Pi and + smaller or equal to Pi. If this Complex is zero, the Complex + is assumed to be positive real with an argument of zero. + + The phase or argument of this Complex + + + + Gets the magnitude (or absolute value) of a complex number. + + The magnitude of the current instance. + + + + Equality test. + + One of complex numbers to compare. + The other complex numbers to compare. + true if the real and imaginary components of the two complex numbers are equal; false otherwise. + + + + Inequality test. + + One of complex numbers to compare. + The other complex numbers to compare. + true if the real or imaginary components of the two complex numbers are not equal; false otherwise. + + + + Unary addition. + + The complex number to operate on. + Returns the same complex number. + + + + Unary minus. + + The complex number to operate on. + The negated value of the . + + + Addition operator. Adds two complex numbers together. + The result of the addition. + One of the complex numbers to add. + The other complex numbers to add. + + + Subtraction operator. Subtracts two complex numbers. + The result of the subtraction. + The complex number to subtract from. + The complex number to subtract. + + + Addition operator. Adds a complex number and double together. + The result of the addition. + The complex numbers to add. + The double value to add. + + + Subtraction operator. Subtracts double value from a complex value. + The result of the subtraction. + The complex number to subtract from. + The double value to subtract. + + + Addition operator. Adds a complex number and double together. + The result of the addition. + The double value to add. + The complex numbers to add. + + + Subtraction operator. Subtracts complex value from a double value. + The result of the subtraction. + The double vale to subtract from. + The complex value to subtract. + + + Multiplication operator. Multiplies two complex numbers. + The result of the multiplication. + One of the complex numbers to multiply. + The other complex number to multiply. + + + Multiplication operator. Multiplies a complex number with a double value. + The result of the multiplication. + The double value to multiply. + The complex number to multiply. + + + Multiplication operator. Multiplies a complex number with a double value. + The result of the multiplication. + The complex number to multiply. + The double value to multiply. + + + Division operator. Divides a complex number by another. + The result of the division. + The dividend. + The divisor. + + + Division operator. Divides a double value by a complex number. + The result of the division. + The dividend. + The divisor. + + + Division operator. Divides a complex number by a double value. + The result of the division. + The dividend. + The divisor. + + + + A string representation of this complex number. + + + The string representation of this complex number. + + + + + A string representation of this complex number. + + + The string representation of this complex number formatted as specified by the + format string. + + + A format specification. + + + + + A string representation of this complex number. + + + The string representation of this complex number formatted as specified by the + format provider. + + + An that supplies culture-specific formatting information. + + + + + A string representation of this complex number. + + + The string representation of this complex number formatted as specified by the + format string and format provider. + + + if the n, is not a number. + + + if s, is . + + + A format specification. + + + An that supplies culture-specific formatting information. + + + + + Checks if two complex numbers are equal. Two complex numbers are equal if their + corresponding real and imaginary components are equal. + + + Returns true if the two objects are the same object, or if their corresponding + real and imaginary components are equal, false otherwise. + + + The complex number to compare to with. + + + + + The hash code for the complex number. + + + The hash code of the complex number. + + + The hash code is calculated as + System.Math.Exp(ComplexMath.Absolute(complexNumber)). + + + + + Checks if two complex numbers are equal. Two complex numbers are equal if their + corresponding real and imaginary components are equal. + + + Returns true if the two objects are the same object, or if their corresponding + real and imaginary components are equal, false otherwise. + + + The complex number to compare to with. + + + + + Returns a Norm of a value of this type, which is appropriate for measuring how + close this value is to zero. + + + A norm of this value. + + + + + Returns a Norm of the difference of two values of this type, which is + appropriate for measuring how close together these two values are. + + + The value to compare with. + + + A norm of the difference between this and the other value. + + + + + Creates a complex number based on a string. The string can be in the + following formats (without the quotes): 'n', 'ni', 'n +/- ni', + 'ni +/- n', 'n,n', 'n,ni,' '(n,n)', or '(n,ni)', where n is a double. + + + A complex number containing the value specified by the given string. + + + The string to parse. + + + + + Creates a complex number based on a string. The string can be in the + following formats (without the quotes): 'n', 'ni', 'n +/- ni', + 'ni +/- n', 'n,n', 'n,ni,' '(n,n)', or '(n,ni)', where n is a double. + + + A complex number containing the value specified by the given string. + + + the string to parse. + + + An that supplies culture-specific + formatting information. + + + + + Parse a part (real or complex) from a complex number. + + Start Token. + Is set to true if the part identified itself as being imaginary. + + An that supplies culture-specific + formatting information. + + Resulting part as double. + + + + + Converts the string representation of a complex number to a single-precision complex number equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex number to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will contain complex32.Zero. This parameter is passed uninitialized + + + + + Converts the string representation of a complex number to single-precision complex number equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex number to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will contain complex32.Zero. This parameter is passed uninitialized + + + + + Explicit conversion of a real decimal to a Complex. + + The decimal value to convert. + The result of the conversion. + + + + Explicit conversion of a Complex to a Complex. + + The decimal value to convert. + The result of the conversion. + + + + Implicit conversion of a real byte to a Complex. + + The byte value to convert. + The result of the conversion. + + + + Implicit conversion of a real short to a Complex. + + The short value to convert. + The result of the conversion. + + + + Implicit conversion of a signed byte to a Complex. + + The signed byte value to convert. + The result of the conversion. + + + + Implicit conversion of a unsgined real short to a Complex. + + The unsgined short value to convert. + The result of the conversion. + + + + Implicit conversion of a real int to a Complex. + + The int value to convert. + The result of the conversion. + + + + Implicit conversion of a real long to a Complex. + + The long value to convert. + The result of the conversion. + + + + Implicit conversion of a real uint to a Complex. + + The uint value to convert. + The result of the conversion. + + + + Implicit conversion of a real ulong to a Complex. + + The ulong value to convert. + The result of the conversion. + + + + Implicit conversion of a real double to a Complex. + + The double value to convert. + The result of the conversion. + + + + Implicit conversion of a real float to a Complex. + + The double value to convert. + The result of the conversion. + + + + Converts this Complex to a . + + A with the same values as this Complex. + + + + Returns the additive inverse of a specified complex number. + + The result of the and components of the parameter multiplied by -1. + A complex number. + + + + Computes the conjugate of a complex number and returns the result. + + The conjugate of . + A complex number. + + + + Adds two complex numbers and returns the result. + + The sum of and . + The first complex number to add. + The second complex number to add. + + + + Subtracts one complex number from another and returns the result. + + The result of subtracting from . + The value to subtract from (the minuend). + The value to subtract (the subtrahend). + + + + Returns the product of two complex numbers. + + The product of the and parameters. + The first complex number to multiply. + The second complex number to multiply. + + + + Divides one complex number by another and returns the result. + + The quotient of the division. + The complex number to be divided. + The complex number to divide by. + + + + Returns the multiplicative inverse of a complex number. + + The reciprocal of . + A complex number. + + + + Returns the square root of a specified complex number. + + The square root of . + A complex number. + + + + Gets the absolute value (or magnitude) of a complex number. + + A complex number. + The absolute value (or magnitude) of a complex number. + + + + Returns e raised to the power specified by a complex number. + + The number e raised to the power . + A complex number that specifies a power. + + + + Returns a specified complex number raised to a power specified by a complex number. + + The complex number raised to the power . + A complex number to be raised to a power. + A complex number that specifies a power. + + + + Returns a specified complex number raised to a power specified by a double-precision floating-point number. + + The complex number raised to the power . + A complex number to be raised to a power. + A double-precision floating-point number that specifies a power. + + + + Returns the natural (base e) logarithm of a specified complex number. + + The natural (base e) logarithm of . + A complex number. + + + + Returns the logarithm of a specified complex number in a specified base. + + The logarithm of in base . + A complex number. + The base of the logarithm. + + + + Returns the base-10 logarithm of a specified complex number. + + The base-10 logarithm of . + A complex number. + + + + Returns the sine of the specified complex number. + + The sine of . + A complex number. + + + + Returns the cosine of the specified complex number. + + The cosine of . + A complex number. + + + + Returns the tangent of the specified complex number. + + The tangent of . + A complex number. + + + + Returns the angle that is the arc sine of the specified complex number. + + The angle which is the arc sine of . + A complex number. + + + + Returns the angle that is the arc cosine of the specified complex number. + + The angle, measured in radians, which is the arc cosine of . + A complex number that represents a cosine. + + + + Returns the angle that is the arc tangent of the specified complex number. + + The angle that is the arc tangent of . + A complex number. + + + + Returns the hyperbolic sine of the specified complex number. + + The hyperbolic sine of . + A complex number. + + + + Returns the hyperbolic cosine of the specified complex number. + + The hyperbolic cosine of . + A complex number. + + + + Returns the hyperbolic tangent of the specified complex number. + + The hyperbolic tangent of . + A complex number. + + + + Extension methods for the Complex type provided by System.Numerics + + + + + Gets the squared magnitude of the Complex number. + + The number to perfom this operation on. + The squared magnitude of the Complex number. + + + + Gets the unity of this complex (same argument, but on the unit circle; exp(I*arg)) + + The unity of this Complex. + + + + Gets the conjugate of the Complex number. + + The number to perfom this operation on. + + The semantic of setting the conjugate is such that + + // a, b of type Complex32 + a.Conjugate = b; + + is equivalent to + + // a, b of type Complex32 + a = b.Conjugate + + + The conjugate of the number. + + + + Returns the multiplicative inverse of a complex number. + + + + + Exponential of this Complex (exp(x), E^x). + + The number to perfom this operation on. + + The exponential of this complex number. + + + + + Natural Logarithm of this Complex (Base E). + + The number to perfom this operation on. + + The natural logarithm of this complex number. + + + + + Common Logarithm of this Complex (Base 10). + + The common logarithm of this complex number. + + + + Logarithm of this Complex with custom base. + + The logarithm of this complex number. + + + + Raise this Complex to the given value. + + The number to perfom this operation on. + + The exponent. + + + The complex number raised to the given exponent. + + + + + Raise this Complex to the inverse of the given value. + + The number to perfom this operation on. + + The root exponent. + + + The complex raised to the inverse of the given exponent. + + + + + The Square (power 2) of this Complex + + The number to perfom this operation on. + + The square of this complex number. + + + + + The Square Root (power 1/2) of this Complex + + The number to perfom this operation on. + + The square root of this complex number. + + + + + Evaluate all square roots of this Complex. + + + + + Evaluate all cubic roots of this Complex. + + + + + Gets a value indicating whether the Complex32 is zero. + + The number to perfom this operation on. + true if this instance is zero; otherwise, false. + + + + Gets a value indicating whether the Complex32 is one. + + The number to perfom this operation on. + true if this instance is one; otherwise, false. + + + + Gets a value indicating whether the Complex32 is the imaginary unit. + + true if this instance is ImaginaryOne; otherwise, false. + The number to perfom this operation on. + + + + Gets a value indicating whether the provided Complex32evaluates + to a value that is not a number. + + The number to perfom this operation on. + + true if this instance is NaN; otherwise, + false. + + + + + Gets a value indicating whether the provided Complex32 evaluates to an + infinite value. + + The number to perfom this operation on. + + true if this instance is infinite; otherwise, false. + + + True if it either evaluates to a complex infinity + or to a directed infinity. + + + + + Gets a value indicating whether the provided Complex32 is real. + + The number to perfom this operation on. + true if this instance is a real number; otherwise, false. + + + + Gets a value indicating whether the provided Complex32 is real and not negative, that is >= 0. + + The number to perfom this operation on. + + true if this instance is real nonnegative number; otherwise, false. + + + + + Returns a Norm of a value of this type, which is appropriate for measuring how + close this value is to zero. + + + + + Returns a Norm of a value of this type, which is appropriate for measuring how + close this value is to zero. + + + + + Returns a Norm of the difference of two values of this type, which is + appropriate for measuring how close together these two values are. + + + + + Returns a Norm of the difference of two values of this type, which is + appropriate for measuring how close together these two values are. + + + + + Creates a complex number based on a string. The string can be in the + following formats (without the quotes): 'n', 'ni', 'n +/- ni', + 'ni +/- n', 'n,n', 'n,ni,' '(n,n)', or '(n,ni)', where n is a double. + + + A complex number containing the value specified by the given string. + + + The string to parse. + + + + + Creates a complex number based on a string. The string can be in the + following formats (without the quotes): 'n', 'ni', 'n +/- ni', + 'ni +/- n', 'n,n', 'n,ni,' '(n,n)', or '(n,ni)', where n is a double. + + + A complex number containing the value specified by the given string. + + + the string to parse. + + + An that supplies culture-specific + formatting information. + + + + + Parse a part (real or complex) from a complex number. + + Start Token. + Is set to true if the part identified itself as being imaginary. + + An that supplies culture-specific + formatting information. + + Resulting part as double. + + + + + Converts the string representation of a complex number to a double-precision complex number equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex number to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will contain Complex.Zero. This parameter is passed uninitialized. + + + + + Converts the string representation of a complex number to double-precision complex number equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex number to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will contain complex32.Zero. This parameter is passed uninitialized + + + + + Creates a Complex32 number based on a string. The string can be in the + following formats (without the quotes): 'n', 'ni', 'n +/- ni', + 'ni +/- n', 'n,n', 'n,ni,' '(n,n)', or '(n,ni)', where n is a double. + + + A complex number containing the value specified by the given string. + + + the string to parse. + + + + + Creates a Complex32 number based on a string. The string can be in the + following formats (without the quotes): 'n', 'ni', 'n +/- ni', + 'ni +/- n', 'n,n', 'n,ni,' '(n,n)', or '(n,ni)', where n is a double. + + + A complex number containing the value specified by the given string. + + + the string to parse. + + + An that supplies culture-specific + formatting information. + + + + + Converts the string representation of a complex number to a single-precision complex number equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex number to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will contain complex32.Zero. This parameter is passed uninitialized. + + + + + Converts the string representation of a complex number to single-precision complex number equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex number to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will contain Complex.Zero. This parameter is passed uninitialized. + + + + + A collection of frequently used mathematical constants. + + + + The number e + + + The number log[2](e) + + + The number log[10](e) + + + The number log[e](2) + + + The number log[e](10) + + + The number log[e](pi) + + + The number log[e](2*pi)/2 + + + The number 1/e + + + The number sqrt(e) + + + The number sqrt(2) + + + The number sqrt(3) + + + The number sqrt(1/2) = 1/sqrt(2) = sqrt(2)/2 + + + The number sqrt(3)/2 + + + The number pi + + + The number pi*2 + + + The number pi/2 + + + The number pi*3/2 + + + The number pi/4 + + + The number sqrt(pi) + + + The number sqrt(2pi) + + + The number sqrt(2*pi*e) + + + The number log(sqrt(2*pi)) + + + The number log(sqrt(2*pi*e)) + + + The number log(2 * sqrt(e / pi)) + + + The number 1/pi + + + The number 2/pi + + + The number 1/sqrt(pi) + + + The number 1/sqrt(2pi) + + + The number 2/sqrt(pi) + + + The number 2 * sqrt(e / pi) + + + The number (pi)/180 - factor to convert from Degree (deg) to Radians (rad). + + + + + The number (pi)/200 - factor to convert from NewGrad (grad) to Radians (rad). + + + + + The number ln(10)/20 - factor to convert from Power Decibel (dB) to Neper (Np). Use this version when the Decibel represent a power gain but the compared values are not powers (e.g. amplitude, current, voltage). + + + The number ln(10)/10 - factor to convert from Neutral Decibel (dB) to Neper (Np). Use this version when either both or neither of the Decibel and the compared values represent powers. + + + The Catalan constant + Sum(k=0 -> inf){ (-1)^k/(2*k + 1)2 } + + + The Euler-Mascheroni constant + lim(n -> inf){ Sum(k=1 -> n) { 1/k - log(n) } } + + + The number (1+sqrt(5))/2, also known as the golden ratio + + + The Glaisher constant + e^(1/12 - Zeta(-1)) + + + The Khinchin constant + prod(k=1 -> inf){1+1/(k*(k+2))^log(k,2)} + + + + The size of a double in bytes. + + + + + The size of an int in bytes. + + + + + The size of a float in bytes. + + + + + The size of a Complex in bytes. + + + + + The size of a Complex in bytes. + + + + Speed of Light in Vacuum: c_0 = 2.99792458e8 [m s^-1] (defined, exact; 2007 CODATA) + + + Magnetic Permeability in Vacuum: mu_0 = 4*Pi * 10^-7 [N A^-2 = kg m A^-2 s^-2] (defined, exact; 2007 CODATA) + + + Electric Permittivity in Vacuum: epsilon_0 = 1/(mu_0*c_0^2) [F m^-1 = A^2 s^4 kg^-1 m^-3] (defined, exact; 2007 CODATA) + + + Characteristic Impedance of Vacuum: Z_0 = mu_0*c_0 [Ohm = m^2 kg s^-3 A^-2] (defined, exact; 2007 CODATA) + + + Newtonian Constant of Gravitation: G = 6.67429e-11 [m^3 kg^-1 s^-2] (2007 CODATA) + + + Planck's constant: h = 6.62606896e-34 [J s = m^2 kg s^-1] (2007 CODATA) + + + Reduced Planck's constant: h_bar = h / (2*Pi) [J s = m^2 kg s^-1] (2007 CODATA) + + + Planck mass: m_p = (h_bar*c_0/G)^(1/2) [kg] (2007 CODATA) + + + Planck temperature: T_p = (h_bar*c_0^5/G)^(1/2)/k [K] (2007 CODATA) + + + Planck length: l_p = h_bar/(m_p*c_0) [m] (2007 CODATA) + + + Planck time: t_p = l_p/c_0 [s] (2007 CODATA) + + + Elementary Electron Charge: e = 1.602176487e-19 [C = A s] (2007 CODATA) + + + Magnetic Flux Quantum: theta_0 = h/(2*e) [Wb = m^2 kg s^-2 A^-1] (2007 CODATA) + + + Conductance Quantum: G_0 = 2*e^2/h [S = m^-2 kg^-1 s^3 A^2] (2007 CODATA) + + + Josephson Constant: K_J = 2*e/h [Hz V^-1] (2007 CODATA) + + + Von Klitzing Constant: R_K = h/e^2 [Ohm = m^2 kg s^-3 A^-2] (2007 CODATA) + + + Bohr Magneton: mu_B = e*h_bar/2*m_e [J T^-1] (2007 CODATA) + + + Nuclear Magneton: mu_N = e*h_bar/2*m_p [J T^-1] (2007 CODATA) + + + Fine Structure Constant: alpha = e^2/4*Pi*e_0*h_bar*c_0 [1] (2007 CODATA) + + + Rydberg Constant: R_infty = alpha^2*m_e*c_0/2*h [m^-1] (2007 CODATA) + + + Bor Radius: a_0 = alpha/4*Pi*R_infty [m] (2007 CODATA) + + + Hartree Energy: E_h = 2*R_infty*h*c_0 [J] (2007 CODATA) + + + Quantum of Circulation: h/2*m_e [m^2 s^-1] (2007 CODATA) + + + Fermi Coupling Constant: G_F/(h_bar*c_0)^3 [GeV^-2] (2007 CODATA) + + + Weak Mixin Angle: sin^2(theta_W) [1] (2007 CODATA) + + + Electron Mass: [kg] (2007 CODATA) + + + Electron Mass Energy Equivalent: [J] (2007 CODATA) + + + Electron Molar Mass: [kg mol^-1] (2007 CODATA) + + + Electron Compton Wavelength: [m] (2007 CODATA) + + + Classical Electron Radius: [m] (2007 CODATA) + + + Tomson Cross Section: [m^2] (2002 CODATA) + + + Electron Magnetic Moment: [J T^-1] (2007 CODATA) + + + Electon G-Factor: [1] (2007 CODATA) + + + Muon Mass: [kg] (2007 CODATA) + + + Muon Mass Energy Equivalent: [J] (2007 CODATA) + + + Muon Molar Mass: [kg mol^-1] (2007 CODATA) + + + Muon Compton Wavelength: [m] (2007 CODATA) + + + Muon Magnetic Moment: [J T^-1] (2007 CODATA) + + + Muon G-Factor: [1] (2007 CODATA) + + + Tau Mass: [kg] (2007 CODATA) + + + Tau Mass Energy Equivalent: [J] (2007 CODATA) + + + Tau Molar Mass: [kg mol^-1] (2007 CODATA) + + + Tau Compton Wavelength: [m] (2007 CODATA) + + + Proton Mass: [kg] (2007 CODATA) + + + Proton Mass Energy Equivalent: [J] (2007 CODATA) + + + Proton Molar Mass: [kg mol^-1] (2007 CODATA) + + + Proton Compton Wavelength: [m] (2007 CODATA) + + + Proton Magnetic Moment: [J T^-1] (2007 CODATA) + + + Proton G-Factor: [1] (2007 CODATA) + + + Proton Shielded Magnetic Moment: [J T^-1] (2007 CODATA) + + + Proton Gyro-Magnetic Ratio: [s^-1 T^-1] (2007 CODATA) + + + Proton Shielded Gyro-Magnetic Ratio: [s^-1 T^-1] (2007 CODATA) + + + Neutron Mass: [kg] (2007 CODATA) + + + Neutron Mass Energy Equivalent: [J] (2007 CODATA) + + + Neutron Molar Mass: [kg mol^-1] (2007 CODATA) + + + Neuron Compton Wavelength: [m] (2007 CODATA) + + + Neutron Magnetic Moment: [J T^-1] (2007 CODATA) + + + Neutron G-Factor: [1] (2007 CODATA) + + + Neutron Gyro-Magnetic Ratio: [s^-1 T^-1] (2007 CODATA) + + + Deuteron Mass: [kg] (2007 CODATA) + + + Deuteron Mass Energy Equivalent: [J] (2007 CODATA) + + + Deuteron Molar Mass: [kg mol^-1] (2007 CODATA) + + + Deuteron Magnetic Moment: [J T^-1] (2007 CODATA) + + + Helion Mass: [kg] (2007 CODATA) + + + Helion Mass Energy Equivalent: [J] (2007 CODATA) + + + Helion Molar Mass: [kg mol^-1] (2007 CODATA) + + + Avogadro constant: [mol^-1] (2010 CODATA) + + + The SI prefix factor corresponding to 1 000 000 000 000 000 000 000 000 + + + The SI prefix factor corresponding to 1 000 000 000 000 000 000 000 + + + The SI prefix factor corresponding to 1 000 000 000 000 000 000 + + + The SI prefix factor corresponding to 1 000 000 000 000 000 + + + The SI prefix factor corresponding to 1 000 000 000 000 + + + The SI prefix factor corresponding to 1 000 000 000 + + + The SI prefix factor corresponding to 1 000 000 + + + The SI prefix factor corresponding to 1 000 + + + The SI prefix factor corresponding to 100 + + + The SI prefix factor corresponding to 10 + + + The SI prefix factor corresponding to 0.1 + + + The SI prefix factor corresponding to 0.01 + + + The SI prefix factor corresponding to 0.001 + + + The SI prefix factor corresponding to 0.000 001 + + + The SI prefix factor corresponding to 0.000 000 001 + + + The SI prefix factor corresponding to 0.000 000 000 001 + + + The SI prefix factor corresponding to 0.000 000 000 000 001 + + + The SI prefix factor corresponding to 0.000 000 000 000 000 001 + + + The SI prefix factor corresponding to 0.000 000 000 000 000 000 001 + + + The SI prefix factor corresponding to 0.000 000 000 000 000 000 000 001 + + + + Sets parameters for the library. + + + + + Use a specific provider if configured, e.g. using + environment variables, or fall back to the best providers. + + + + + Use the best provider available. + + + + + Gets or sets a value indicating whether the distribution classes check validate each parameter. + For the multivariate distributions this could involve an expensive matrix factorization. + The default setting of this property is true. + + + + + Gets or sets a value indicating whether to use thread safe random number generators (RNG). + Thread safe RNG about two and half time slower than non-thread safe RNG. + + + true to use thread safe random number generators ; otherwise, false. + + + + + Optional path to try to load native provider binaries from. + + + + + Gets or sets the linear algebra provider. Consider to use UseNativeMKL or UseManaged instead. + + The linear algebra provider. + + + + Gets or sets the fourier transform provider. Consider to use UseNativeMKL or UseManaged instead. + + The linear algebra provider. + + + + Gets or sets a value indicating how many parallel worker threads shall be used + when parallelization is applicable. + + Default to the number of processor cores, must be between 1 and 1024 (inclusive). + + + + Gets or sets the TaskScheduler used to schedule the worker tasks. + + + + + Gets or sets the the block size to use for + the native linear algebra provider. + + The block size. Default 512, must be at least 32. + + + + Gets or sets the order of the matrix when linear algebra provider + must calculate multiply in parallel threads. + + The order. Default 64, must be at least 3. + + + + Gets or sets the number of elements a vector or matrix + must contain before we multiply threads. + + Number of elements. Default 300, must be at least 3. + + + + Numerical Derivative. + + + + + Initialized a NumericalDerivative with the given points and center. + + + + + Initialized a NumericalDerivative with the default points and center for the given order. + + + + + Evaluates the derivative of a scalar univariate function. + + Univariate function handle. + Point at which to evaluate the derivative. + Derivative order. + + + + Creates a function handle for the derivative of a scalar univariate function. + + Univariate function handle. + Derivative order. + + + + Evaluates the first derivative of a scalar univariate function. + + Univariate function handle. + Point at which to evaluate the derivative. + + + + Creates a function handle for the first derivative of a scalar univariate function. + + Univariate function handle. + + + + Evaluates the second derivative of a scalar univariate function. + + Univariate function handle. + Point at which to evaluate the derivative. + + + + Creates a function handle for the second derivative of a scalar univariate function. + + Univariate function handle. + + + + Evaluates the partial derivative of a multivariate function. + + Multivariate function handle. + Vector at which to evaluate the derivative. + Index of independent variable for partial derivative. + Derivative order. + + + + Creates a function handle for the partial derivative of a multivariate function. + + Multivariate function handle. + Index of independent variable for partial derivative. + Derivative order. + + + + Evaluates the first partial derivative of a multivariate function. + + Multivariate function handle. + Vector at which to evaluate the derivative. + Index of independent variable for partial derivative. + + + + Creates a function handle for the first partial derivative of a multivariate function. + + Multivariate function handle. + Index of independent variable for partial derivative. + + + + Evaluates the partial derivative of a bivariate function. + + Bivariate function handle. + First argument at which to evaluate the derivative. + Second argument at which to evaluate the derivative. + Index of independent variable for partial derivative. + Derivative order. + + + + Creates a function handle for the partial derivative of a bivariate function. + + Bivariate function handle. + Index of independent variable for partial derivative. + Derivative order. + + + + Evaluates the first partial derivative of a bivariate function. + + Bivariate function handle. + First argument at which to evaluate the derivative. + Second argument at which to evaluate the derivative. + Index of independent variable for partial derivative. + + + + Creates a function handle for the first partial derivative of a bivariate function. + + Bivariate function handle. + Index of independent variable for partial derivative. + + + + Class to calculate finite difference coefficients using Taylor series expansion method. + + + For n points, coefficients are calculated up to the maximum derivative order possible (n-1). + The current function value position specifies the "center" for surrounding coefficients. + Selecting the first, middle or last positions represent forward, backwards and central difference methods. + + + + + + + Number of points for finite difference coefficients. Changing this value recalculates the coefficients table. + + + + + Initializes a new instance of the class. + + Number of finite difference coefficients. + + + + Gets the finite difference coefficients for a specified center and order. + + Current function position with respect to coefficients. Must be within point range. + Order of finite difference coefficients. + Vector of finite difference coefficients. + + + + Gets the finite difference coefficients for all orders at a specified center. + + Current function position with respect to coefficients. Must be within point range. + Rectangular array of coefficients, with columns specifing order. + + + + Type of finite different step size. + + + + + The absolute step size value will be used in numerical derivatives, regardless of order or function parameters. + + + + + A base step size value, h, will be scaled according to the function input parameter. A common example is hx = h*(1+abs(x)), however + this may vary depending on implementation. This definition only guarantees that the only scaling will be relative to the + function input parameter and not the order of the finite difference derivative. + + + + + A base step size value, eps (typically machine precision), is scaled according to the finite difference coefficient order + and function input parameter. The initial scaling according to finite different coefficient order can be thought of as producing a + base step size, h, that is equivalent to scaling. This stepsize is then scaled according to the function + input parameter. Although implementation may vary, an example of second order accurate scaling may be (eps)^(1/3)*(1+abs(x)). + + + + + Class to evaluate the numerical derivative of a function using finite difference approximations. + Variable point and center methods can be initialized . + This class can also be used to return function handles (delegates) for a fixed derivative order and variable. + It is possible to evaluate the derivative and partial derivative of univariate and multivariate functions respectively. + + + + + Initializes a NumericalDerivative class with the default 3 point center difference method. + + + + + Initialized a NumericalDerivative class. + + Number of points for finite difference derivatives. + Location of the center with respect to other points. Value ranges from zero to points-1. + + + + Sets and gets the finite difference step size. This value is for each function evaluation if relative stepsize types are used. + If the base step size used in scaling is desired, see . + + + Setting then getting the StepSize may return a different value. This is not unusual since a user-defined step size is converted to a + base-2 representable number to improve finite difference accuracy. + + + + + Sets and gets the base fininte difference step size. This assigned value to this parameter is only used if is set to RelativeX. + However, if the StepType is Relative, it will contain the base step size computed from based on the finite difference order. + + + + + Sets and gets the base finite difference step size. This parameter is only used if is set to Relative. + By default this is set to machine epsilon, from which is computed. + + + + + Sets and gets the location of the center point for the finite difference derivative. + + + + + Number of times a function is evaluated for numerical derivatives. + + + + + Type of step size for computing finite differences. If set to absolute, dx = h. + If set to relative, dx = (1+abs(x))*h^(2/(order+1)). This provides accurate results when + h is approximately equal to the square-root of machine accuracy, epsilon. + + + + + Evaluates the derivative of equidistant points using the finite difference method. + + Vector of points StepSize apart. + Derivative order. + Finite difference step size. + Derivative of points of the specified order. + + + + Evaluates the derivative of a scalar univariate function. + + + Supplying the optional argument currentValue will reduce the number of function evaluations + required to calculate the finite difference derivative. + + Function handle. + Point at which to compute the derivative. + Derivative order. + Current function value at center. + Function derivative at x of the specified order. + + + + Creates a function handle for the derivative of a scalar univariate function. + + Input function handle. + Derivative order. + Function handle that evaluates the derivative of input function at a fixed order. + + + + Evaluates the partial derivative of a multivariate function. + + Multivariate function handle. + Vector at which to evaluate the derivative. + Index of independent variable for partial derivative. + Derivative order. + Current function value at center. + Function partial derivative at x of the specified order. + + + + Evaluates the partial derivatives of a multivariate function array. + + + This function assumes the input vector x is of the correct length for f. + + Multivariate vector function array handle. + Vector at which to evaluate the derivatives. + Index of independent variable for partial derivative. + Derivative order. + Current function value at center. + Vector of functions partial derivatives at x of the specified order. + + + + Creates a function handle for the partial derivative of a multivariate function. + + Input function handle. + Index of the independent variable for partial derivative. + Derivative order. + Function handle that evaluates partial derivative of input function at a fixed order. + + + + Creates a function handle for the partial derivative of a vector multivariate function. + + Input function handle. + Index of the independent variable for partial derivative. + Derivative order. + Function handle that evaluates partial derivative of input function at fixed order. + + + + Evaluates the mixed partial derivative of variable order for multivariate functions. + + + This function recursively uses to evaluate mixed partial derivative. + Therefore, it is more efficient to call for higher order derivatives of + a single independent variable. + + Multivariate function handle. + Points at which to evaluate the derivative. + Vector of indices for the independent variables at descending derivative orders. + Highest order of differentiation. + Current function value at center. + Function mixed partial derivative at x of the specified order. + + + + Evaluates the mixed partial derivative of variable order for multivariate function arrays. + + + This function recursively uses to evaluate mixed partial derivative. + Therefore, it is more efficient to call for higher order derivatives of + a single independent variable. + + Multivariate function array handle. + Vector at which to evaluate the derivative. + Vector of indices for the independent variables at descending derivative orders. + Highest order of differentiation. + Current function value at center. + Function mixed partial derivatives at x of the specified order. + + + + Creates a function handle for the mixed partial derivative of a multivariate function. + + Input function handle. + Vector of indices for the independent variables at descending derivative orders. + Highest derivative order. + Function handle that evaluates the fixed mixed partial derivative of input function at fixed order. + + + + Creates a function handle for the mixed partial derivative of a multivariate vector function. + + Input vector function handle. + Vector of indices for the independent variables at descending derivative orders. + Highest derivative order. + Function handle that evaluates the fixed mixed partial derivative of input function at fixed order. + + + + Resets the evaluation counter. + + + + + Class for evaluating the Hessian of a smooth continuously differentiable function using finite differences. + By default, a central 3-point method is used. + + + + + Number of function evaluations. + + + + + Creates a numerical Hessian object with a three point central difference method. + + + + + Creates a numerical Hessian with a specified differentiation scheme. + + Number of points for Hessian evaluation. + Center point for differentiation. + + + + Evaluates the Hessian of the scalar univariate function f at points x. + + Scalar univariate function handle. + Point at which to evaluate Hessian. + Hessian tensor. + + + + Evaluates the Hessian of a multivariate function f at points x. + + + This method of computing the Hessian is only vaid for Lipschitz continuous functions. + The function mirrors the Hessian along the diagonal since d2f/dxdy = d2f/dydx for continuously differentiable functions. + + Multivariate function handle.> + Points at which to evaluate Hessian.> + Hessian tensor. + + + + Resets the function evaluation counter for the Hessian. + + + + + Class for evaluating the Jacobian of a function using finite differences. + By default, a central 3-point method is used. + + + + + Number of function evaluations. + + + + + Creates a numerical Jacobian object with a three point central difference method. + + + + + Creates a numerical Jacobian with a specified differentiation scheme. + + Number of points for Jacobian evaluation. + Center point for differentiation. + + + + Evaluates the Jacobian of scalar univariate function f at point x. + + Scalar univariate function handle. + Point at which to evaluate Jacobian. + Jacobian vector. + + + + Evaluates the Jacobian of a multivariate function f at vector x. + + + This function assumes that the length of vector x consistent with the argument count of f. + + Multivariate function handle. + Points at which to evaluate Jacobian. + Jacobian vector. + + + + Evaluates the Jacobian of a multivariate function f at vector x given a current function value. + + + To minimize the number of function evaluations, a user can supply the current value of the function + to be used in computing the Jacobian. This value must correspond to the "center" location for the + finite differencing. If a scheme is used where the center value is not evaluated, this will provide no + added efficiency. This method also assumes that the length of vector x consistent with the argument count of f. + + Multivariate function handle. + Points at which to evaluate Jacobian. + Current function value at finite difference center. + Jacobian vector. + + + + Evaluates the Jacobian of a multivariate function array f at vector x. + + Multivariate function array handle. + Vector at which to evaluate Jacobian. + Jacobian matrix. + + + + Evaluates the Jacobian of a multivariate function array f at vector x given a vector of current function values. + + + To minimize the number of function evaluations, a user can supply a vector of current values of the functions + to be used in computing the Jacobian. These value must correspond to the "center" location for the + finite differencing. If a scheme is used where the center value is not evaluated, this will provide no + added efficiency. This method also assumes that the length of vector x consistent with the argument count of f. + + Multivariate function array handle. + Vector at which to evaluate Jacobian. + Vector of current function values. + Jacobian matrix. + + + + Resets the function evaluation counter for the Jacobian. + + + + + Metrics to measure the distance between two structures. + + + + + Sum of Absolute Difference (SAD), i.e. the L1-norm (Manhattan) of the difference. + + + + + Sum of Absolute Difference (SAD), i.e. the L1-norm (Manhattan) of the difference. + + + + + Sum of Absolute Difference (SAD), i.e. the L1-norm (Manhattan) of the difference. + + + + + Mean-Absolute Error (MAE), i.e. the normalized L1-norm (Manhattan) of the difference. + + + + + Mean-Absolute Error (MAE), i.e. the normalized L1-norm (Manhattan) of the difference. + + + + + Mean-Absolute Error (MAE), i.e. the normalized L1-norm (Manhattan) of the difference. + + + + + Sum of Squared Difference (SSD), i.e. the squared L2-norm (Euclidean) of the difference. + + + + + Sum of Squared Difference (SSD), i.e. the squared L2-norm (Euclidean) of the difference. + + + + + Sum of Squared Difference (SSD), i.e. the squared L2-norm (Euclidean) of the difference. + + + + + Mean-Squared Error (MSE), i.e. the normalized squared L2-norm (Euclidean) of the difference. + + + + + Mean-Squared Error (MSE), i.e. the normalized squared L2-norm (Euclidean) of the difference. + + + + + Mean-Squared Error (MSE), i.e. the normalized squared L2-norm (Euclidean) of the difference. + + + + + Euclidean Distance, i.e. the L2-norm of the difference. + + + + + Euclidean Distance, i.e. the L2-norm of the difference. + + + + + Euclidean Distance, i.e. the L2-norm of the difference. + + + + + Manhattan Distance, i.e. the L1-norm of the difference. + + + + + Manhattan Distance, i.e. the L1-norm of the difference. + + + + + Manhattan Distance, i.e. the L1-norm of the difference. + + + + + Chebyshev Distance, i.e. the Infinity-norm of the difference. + + + + + Chebyshev Distance, i.e. the Infinity-norm of the difference. + + + + + Chebyshev Distance, i.e. the Infinity-norm of the difference. + + + + + Minkowski Distance, i.e. the generalized p-norm of the difference. + + + + + Minkowski Distance, i.e. the generalized p-norm of the difference. + + + + + Minkowski Distance, i.e. the generalized p-norm of the difference. + + + + + Canberra Distance, a weighted version of the L1-norm of the difference. + + + + + Canberra Distance, a weighted version of the L1-norm of the difference. + + + + + Cosine Distance, representing the angular distance while ignoring the scale. + + + + + Cosine Distance, representing the angular distance while ignoring the scale. + + + + + Hamming Distance, i.e. the number of positions that have different values in the vectors. + + + + + Hamming Distance, i.e. the number of positions that have different values in the vectors. + + + + + Pearson's distance, i.e. 1 - the person correlation coefficient. + + + + + Jaccard distance, i.e. 1 - the Jaccard index. + + Thrown if a or b are null. + Throw if a and b are of different lengths. + Jaccard distance. + + + + Jaccard distance, i.e. 1 - the Jaccard index. + + Thrown if a or b are null. + Throw if a and b are of different lengths. + Jaccard distance. + + + + Discrete Univariate Bernoulli distribution. + The Bernoulli distribution is a distribution over bits. The parameter + p specifies the probability that a 1 is generated. + Wikipedia - Bernoulli distribution. + + + + + Initializes a new instance of the Bernoulli class. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + If the Bernoulli parameter is not in the range [0,1]. + + + + Initializes a new instance of the Bernoulli class. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + The random number generator which is used to draw random samples. + If the Bernoulli parameter is not in the range [0,1]. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Gets the probability of generating a one. Range: 0 ≤ p ≤ 1. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the mode of the distribution. + + + + + Gets all modes of the distribution. + + + + + Gets the median of the distribution. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + the cumulative distribution at location . + + + + + Generates one sample from the Bernoulli distribution. + + The random source to use. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + A random sample from the Bernoulli distribution. + + + + Samples a Bernoulli distributed random variable. + + A sample from the Bernoulli distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of Bernoulli distributed random variables. + + a sequence of samples from the distribution. + + + + Samples a Bernoulli distributed random variable. + + The random number generator to use. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + A sample from the Bernoulli distribution. + + + + Samples a sequence of Bernoulli distributed random variables. + + The random number generator to use. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + a sequence of samples from the distribution. + + + + Samples a Bernoulli distributed random variable. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + A sample from the Bernoulli distribution. + + + + Samples a sequence of Bernoulli distributed random variables. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + a sequence of samples from the distribution. + + + + Continuous Univariate Beta distribution. + For details about this distribution, see + Wikipedia - Beta distribution. + + + There are a few special cases for the parameterization of the Beta distribution. When both + shape parameters are positive infinity, the Beta distribution degenerates to a point distribution + at 0.5. When one of the shape parameters is positive infinity, the distribution degenerates to a point + distribution at the positive infinity. When both shape parameters are 0.0, the Beta distribution + degenerates to a Bernoulli distribution with parameter 0.5. When one shape parameter is 0.0, the + distribution degenerates to a point distribution at the non-zero shape parameter. + + + + + Initializes a new instance of the Beta class. + + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + + + + Initializes a new instance of the Beta class. + + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + A string representation of the Beta distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + + + + Gets the α shape parameter of the Beta distribution. Range: α ≥ 0. + + + + + Gets the β shape parameter of the Beta distribution. Range: β ≥ 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the Beta distribution. + + + + + Gets the variance of the Beta distribution. + + + + + Gets the standard deviation of the Beta distribution. + + + + + Gets the entropy of the Beta distribution. + + + + + Gets the skewness of the Beta distribution. + + + + + Gets the mode of the Beta distribution; when there are multiple answers, this routine will return 0.5. + + + + + Gets the median of the Beta distribution. + + + + + Gets the minimum of the Beta distribution. + + + + + Gets the maximum of the Beta distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Generates a sample from the Beta distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Beta distribution. + + a sequence of samples from the distribution. + + + + Samples Beta distributed random variables by sampling two Gamma variables and normalizing. + + The random number generator to use. + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + a random number from the Beta distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Generates a sample from the distribution. + + The random number generator to use. + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Initializes a new instance of the BetaScaled class. + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + + + + Initializes a new instance of the BetaScaled class. + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The random number generator which is used to draw random samples. + + + + Create a Beta PERT distribution, used in risk analysis and other domains where an expert forecast + is used to construct an underlying beta distribution. + + The minimum value. + The maximum value. + The most likely value (mode). + The random number generator which is used to draw random samples. + The Beta distribution derived from the PERT parameters. + + + + A string representation of the distribution. + + A string representation of the BetaScaled distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + + + + Gets the α shape parameter of the BetaScaled distribution. Range: α > 0. + + + + + Gets the β shape parameter of the BetaScaled distribution. Range: β > 0. + + + + + Gets the location (μ) of the BetaScaled distribution. + + + + + Gets the scale (σ) of the BetaScaled distribution. Range: σ > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the BetaScaled distribution. + + + + + Gets the variance of the BetaScaled distribution. + + + + + Gets the standard deviation of the BetaScaled distribution. + + + + + Gets the entropy of the BetaScaled distribution. + + + + + Gets the skewness of the BetaScaled distribution. + + + + + Gets the mode of the BetaScaled distribution; when there are multiple answers, this routine will return 0.5. + + + + + Gets the median of the BetaScaled distribution. + + + + + Gets the minimum of the BetaScaled distribution. + + + + + Gets the maximum of the BetaScaled distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Generates a sample from the distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Generates a sample from the distribution. + + The random number generator to use. + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Discrete Univariate Binomial distribution. + For details about this distribution, see + Wikipedia - Binomial distribution. + + + The distribution is parameterized by a probability (between 0.0 and 1.0). + + + + + Initializes a new instance of the Binomial class. + + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + If is not in the interval [0.0,1.0]. + If is negative. + + + + Initializes a new instance of the Binomial class. + + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + The random number generator which is used to draw random samples. + If is not in the interval [0.0,1.0]. + If is negative. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + + + + Gets the success probability in each trial. Range: 0 ≤ p ≤ 1. + + + + + Gets the number of trials. Range: n ≥ 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the mode of the distribution. + + + + + Gets all modes of the distribution. + + + + + Gets the median of the distribution. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + the cumulative distribution at location . + + + + + Generates a sample from the Binomial distribution without doing parameter checking. + + The random number generator to use. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + The number of successful trials. + + + + Samples a Binomially distributed random variable. + + The number of successes in N trials. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of Binomially distributed random variables. + + a sequence of successes in N trials. + + + + Samples a binomially distributed random variable. + + The random number generator to use. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + The number of successes in trials. + + + + Samples a sequence of binomially distributed random variable. + + The random number generator to use. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + a sequence of successes in trials. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + a sequence of successes in trials. + + + + Samples a binomially distributed random variable. + + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + The number of successes in trials. + + + + Samples a sequence of binomially distributed random variable. + + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + a sequence of successes in trials. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + a sequence of successes in trials. + + + + Discrete Univariate Categorical distribution. + For details about this distribution, see + Wikipedia - Categorical distribution. This + distribution is sometimes called the Discrete distribution. + + + The distribution is parameterized by a vector of ratios: in other words, the parameter + does not have to be normalized and sum to 1. The reason is that some vectors can't be exactly normalized + to sum to 1 in floating point representation. + + + Support: 0..k where k = length(probability mass array)-1 + + + + + Initializes a new instance of the Categorical class. + + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + If any of the probabilities are negative or do not sum to one. + + + + Initializes a new instance of the Categorical class. + + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + The random number generator which is used to draw random samples. + If any of the probabilities are negative or do not sum to one. + + + + Initializes a new instance of the Categorical class from a . The distribution + will not be automatically updated when the histogram changes. The categorical distribution will have + one value for each bucket and a probability for that value proportional to the bucket count. + + The histogram from which to create the categorical variable. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Checks whether the parameters of the distribution are valid. + + An array of nonnegative ratios: this array does not need to be normalized as this is often impossible using floating point arithmetic. + If any of the probabilities are negative returns false, or if the sum of parameters is 0.0; otherwise true + + + + Checks whether the parameters of the distribution are valid. + + An array of nonnegative ratios: this array does not need to be normalized as this is often impossible using floating point arithmetic. + If any of the probabilities are negative returns false, or if the sum of parameters is 0.0; otherwise true + + + + Gets the probability mass vector (non-negative ratios) of the multinomial. + + Sometimes the normalized probability vector cannot be represented exactly in a floating point representation. + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + Throws a . + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Gets he mode of the distribution. + + Throws a . + + + + Gets the median of the distribution. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. + + A real number between 0 and 1. + An integer between 0 and the size of the categorical (exclusive), that corresponds to the inverse CDF for the given probability. + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. + + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + A real number between 0 and 1. + An integer between 0 and the size of the categorical (exclusive), that corresponds to the inverse CDF for the given probability. + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. + + An array corresponding to a CDF for a categorical distribution. Not assumed to be normalized. + A real number between 0 and 1. + An integer between 0 and the size of the categorical (exclusive), that corresponds to the inverse CDF for the given probability. + + + + Computes the cumulative distribution function. This method performs no parameter checking. + If the probability mass was normalized, the resulting cumulative distribution is normalized as well (up to numerical errors). + + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + An array representing the unnormalized cumulative distribution function. + + + + Returns one trials from the categorical distribution. + + The random number generator to use. + The (unnormalized) cumulative distribution of the probability distribution. + One sample from the categorical distribution implied by . + + + + Samples a Binomially distributed random variable. + + The number of successful trials. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of Bernoulli distributed random variables. + + a sequence of successful trial counts. + + + + Samples one categorical distributed random variable; also known as the Discrete distribution. + + The random number generator to use. + An array of nonnegative ratios. Not assumed to be normalized. + One random integer between 0 and the size of the categorical (exclusive). + + + + Samples a categorically distributed random variable. + + The random number generator to use. + An array of nonnegative ratios. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + An array of nonnegative ratios. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Samples one categorical distributed random variable; also known as the Discrete distribution. + + An array of nonnegative ratios. Not assumed to be normalized. + One random integer between 0 and the size of the categorical (exclusive). + + + + Samples a categorically distributed random variable. + + An array of nonnegative ratios. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + An array of nonnegative ratios. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Samples one categorical distributed random variable; also known as the Discrete distribution. + + The random number generator to use. + An array of the cumulative distribution. Not assumed to be normalized. + One random integer between 0 and the size of the categorical (exclusive). + + + + Samples a categorically distributed random variable. + + The random number generator to use. + An array of the cumulative distribution. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + An array of the cumulative distribution. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Samples one categorical distributed random variable; also known as the Discrete distribution. + + An array of the cumulative distribution. Not assumed to be normalized. + One random integer between 0 and the size of the categorical (exclusive). + + + + Samples a categorically distributed random variable. + + An array of the cumulative distribution. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + An array of the cumulative distribution. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Continuous Univariate Cauchy distribution. + The Cauchy distribution is a symmetric continuous probability distribution. For details about this distribution, see + Wikipedia - Cauchy distribution. + + + + + Initializes a new instance of the class with the location parameter set to 0 and the scale parameter set to 1 + + + + + Initializes a new instance of the class. + + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + + + + Initializes a new instance of the class. + + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + + + + Gets the location (x0) of the distribution. + + + + + Gets the scale (γ) of the distribution. Range: γ > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Draws a random sample from the distribution. + + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Cauchy distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + the inverse cumulative density at . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + a sequence of samples from the distribution. + + + + Continuous Univariate Chi distribution. + This distribution is a continuous probability distribution. The distribution usually arises when a k-dimensional vector's orthogonal + components are independent and each follow a standard normal distribution. The length of the vector will + then have a chi distribution. + Wikipedia - Chi distribution. + + + + + Initializes a new instance of the class. + + The degrees of freedom (k) of the distribution. Range: k > 0. + + + + Initializes a new instance of the class. + + The degrees of freedom (k) of the distribution. Range: k > 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The degrees of freedom (k) of the distribution. Range: k > 0. + + + + Gets the degrees of freedom (k) of the Chi distribution. Range: k > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Generates a sample from the Chi distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Chi distribution. + + a sequence of samples from the distribution. + + + + Samples the distribution. + + The random number generator to use. + The degrees of freedom (k) of the distribution. Range: k > 0. + a random number from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The degrees of freedom (k) of the distribution. Range: k > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The degrees of freedom (k) of the distribution. Range: k > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The degrees of freedom (k) of the distribution. Range: k > 0. + the cumulative distribution at location . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The degrees of freedom (k) of the distribution. Range: k > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sequence of samples from the distribution. + + + + Continuous Univariate Chi-Squared distribution. + This distribution is a sum of the squares of k independent standard normal random variables. + Wikipedia - ChiSquare distribution. + + + + + Initializes a new instance of the class. + + The degrees of freedom (k) of the distribution. Range: k > 0. + + + + Initializes a new instance of the class. + + The degrees of freedom (k) of the distribution. Range: k > 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The degrees of freedom (k) of the distribution. Range: k > 0. + + + + Gets the degrees of freedom (k) of the Chi-Squared distribution. Range: k > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Generates a sample from the ChiSquare distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the ChiSquare distribution. + + a sequence of samples from the distribution. + + + + Samples the distribution. + + The random number generator to use. + The degrees of freedom (k) of the distribution. Range: k > 0. + a random number from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The degrees of freedom (k) of the distribution. Range: k > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The degrees of freedom (k) of the distribution. Range: k > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The degrees of freedom (k) of the distribution. Range: k > 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The degrees of freedom (k) of the distribution. Range: k > 0. + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + Generates a sample from the ChiSquare distribution. + + The random number generator to use. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Generates a sample from the ChiSquare distribution. + + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Continuous Univariate Uniform distribution. + The continuous uniform distribution is a distribution over real numbers. For details about this distribution, see + Wikipedia - Continuous uniform distribution. + + + + + Initializes a new instance of the ContinuousUniform class with lower bound 0 and upper bound 1. + + + + + Initializes a new instance of the ContinuousUniform class with given lower and upper bounds. + + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + If the upper bound is smaller than the lower bound. + + + + Initializes a new instance of the ContinuousUniform class with given lower and upper bounds. + + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + The random number generator which is used to draw random samples. + If the upper bound is smaller than the lower bound. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + + + + Gets the lower bound of the distribution. + + + + + Gets the upper bound of the distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + + Gets the median of the distribution. + + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Generates a sample from the ContinuousUniform distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the ContinuousUniform distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + the inverse cumulative density at . + + + + + Generates a sample from the ContinuousUniform distribution. + + The random number generator to use. + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + a uniformly distributed sample. + + + + Generates a sequence of samples from the ContinuousUniform distribution. + + The random number generator to use. + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + a sequence of uniformly distributed samples. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + a sequence of samples from the distribution. + + + + Generates a sample from the ContinuousUniform distribution. + + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + a uniformly distributed sample. + + + + Generates a sequence of samples from the ContinuousUniform distribution. + + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + a sequence of uniformly distributed samples. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + a sequence of samples from the distribution. + + + + Discrete Univariate Conway-Maxwell-Poisson distribution. + The Conway-Maxwell-Poisson distribution is a generalization of the Poisson, Geometric and Bernoulli + distributions. It is parameterized by two real numbers "lambda" and "nu". For + + nu = 0 the distribution reverts to a Geometric distribution + nu = 1 the distribution reverts to the Poisson distribution + nu -> infinity the distribution converges to a Bernoulli distribution + + This implementation will cache the value of the normalization constant. + Wikipedia - ConwayMaxwellPoisson distribution. + + + + + The mean of the distribution. + + + + + The variance of the distribution. + + + + + Caches the value of the normalization constant. + + + + + Since many properties of the distribution can only be computed approximately, the tolerance + level specifies how much error we accept. + + + + + Initializes a new instance of the class. + + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Initializes a new instance of the class. + + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + The random number generator which is used to draw random samples. + + + + Returns a that represents this instance. + + A that represents this instance. + + + + Tests whether the provided values are valid parameters for this distribution. + + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Gets the lambda (λ) parameter. Range: λ > 0. + + + + + Gets the rate of decay (ν) parameter. Range: ν ≥ 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution + + + + + Gets the median of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + the cumulative distribution at location . + + + + + Gets the normalization constant of the Conway-Maxwell-Poisson distribution. + + + + + Computes an approximate normalization constant for the CMP distribution. + + The lambda (λ) parameter for the CMP distribution. + The rate of decay (ν) parameter for the CMP distribution. + + an approximate normalization constant for the CMP distribution. + + + + + Returns one trials from the distribution. + + The random number generator to use. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + The z parameter. + + One sample from the distribution implied by , , and . + + + + + Samples a Conway-Maxwell-Poisson distributed random variable. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Samples a sequence of a Conway-Maxwell-Poisson distributed random variables. + + + a sequence of samples from a Conway-Maxwell-Poisson distribution. + + + + + Samples a random variable. + + The random number generator to use. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Samples a sequence of this random variable. + + The random number generator to use. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Samples a random variable. + + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Samples a sequence of this random variable. + + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Multivariate Dirichlet distribution. For details about this distribution, see + Wikipedia - Dirichlet distribution. + + + + + Initializes a new instance of the Dirichlet class. The distribution will + be initialized with the default random number generator. + + An array with the Dirichlet parameters. + + + + Initializes a new instance of the Dirichlet class. The distribution will + be initialized with the default random number generator. + + An array with the Dirichlet parameters. + The random number generator which is used to draw random samples. + + + + Initializes a new instance of the class. + random number generator. + The value of each parameter of the Dirichlet distribution. + The dimension of the Dirichlet distribution. + + + + Initializes a new instance of the class. + random number generator. + The value of each parameter of the Dirichlet distribution. + The dimension of the Dirichlet distribution. + The random number generator which is used to draw random samples. + + + + Returns a that represents this instance. + + + A that represents this instance. + + + + + Tests whether the provided values are valid parameters for this distribution. + No parameter can be less than zero and at least one parameter should be larger than zero. + + The parameters of the Dirichlet distribution. + + + + Gets or sets the parameters of the Dirichlet distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the dimension of the Dirichlet distribution. + + + + + Gets the sum of the Dirichlet parameters. + + + + + Gets the mean of the Dirichlet distribution. + + + + + Gets the variance of the Dirichlet distribution. + + + + + Gets the entropy of the distribution. + + + + + Computes the density of the distribution. + + The locations at which to compute the density. + the density at . + The Dirichlet distribution requires that the sum of the components of x equals 1. + You can also leave out the last component, and it will be computed from the others. + + + + Computes the log density of the distribution. + + The locations at which to compute the density. + the density at . + + + + Samples a Dirichlet distributed random vector. + + A sample from this distribution. + + + + Samples a Dirichlet distributed random vector. + + The random number generator to use. + The Dirichlet distribution parameter. + a sample from the distribution. + + + + Discrete Univariate Uniform distribution. + The discrete uniform distribution is a distribution over integers. The distribution + is parameterized by a lower and upper bound (both inclusive). + Wikipedia - Discrete uniform distribution. + + + + + Initializes a new instance of the DiscreteUniform class. + + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + + + + Initializes a new instance of the DiscreteUniform class. + + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + The random number generator which is used to draw random samples. + + + + Returns a that represents this instance. + + + A that represents this instance. + + + + + Tests whether the provided values are valid parameters for this distribution. + + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + + + + Gets the inclusive lower bound of the probability distribution. + + + + + Gets the inclusive upper bound of the probability distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the mode of the distribution; since every element in the domain has the same probability this method returns the middle one. + + + + + Gets the median of the distribution. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + the cumulative distribution at location . + + + + + Generates one sample from the discrete uniform distribution. This method does not do any parameter checking. + + The random source to use. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + A random sample from the discrete uniform distribution. + + + + Draws a random sample from the distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of uniformly distributed random variables. + + a sequence of samples from the distribution. + + + + Samples a uniformly distributed random variable. + + The random number generator to use. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + A sample from the discrete uniform distribution. + + + + Samples a sequence of uniformly distributed random variables. + + The random number generator to use. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + a sequence of samples from the discrete uniform distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + a sequence of samples from the discrete uniform distribution. + + + + Samples a uniformly distributed random variable. + + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + A sample from the discrete uniform distribution. + + + + Samples a sequence of uniformly distributed random variables. + + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + a sequence of samples from the discrete uniform distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + a sequence of samples from the discrete uniform distribution. + + + + Continuous Univariate Erlang distribution. + This distribution is is a continuous probability distribution with wide applicability primarily due to its + relation to the exponential and Gamma distributions. + Wikipedia - Erlang distribution. + + + + + Initializes a new instance of the class. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + + + + Initializes a new instance of the class. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + The random number generator which is used to draw random samples. + + + + Constructs a Erlang distribution from a shape and scale parameter. The distribution will + be initialized with the default random number generator. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The scale (μ) of the Erlang distribution. Range: μ ≥ 0. + The random number generator which is used to draw random samples. Optional, can be null. + + + + Constructs a Erlang distribution from a shape and inverse scale parameter. The distribution will + be initialized with the default random number generator. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + The random number generator which is used to draw random samples. Optional, can be null. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + + + + Gets the shape (k) of the Erlang distribution. Range: k ≥ 0. + + + + + Gets the rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + + + + + Gets the scale of the Erlang distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum value. + + + + + Gets the Maximum value. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Generates a sample from the Erlang distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Erlang distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + the cumulative distribution at location . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Continuous Univariate Exponential distribution. + The exponential distribution is a distribution over the real numbers parameterized by one non-negative parameter. + Wikipedia - exponential distribution. + + + + + Initializes a new instance of the class. + + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + + + + Initializes a new instance of the class. + + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + + + + Gets the rate (λ) parameter of the distribution. Range: λ ≥ 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Draws a random sample from the distribution. + + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Exponential distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + the inverse cumulative density at . + + + + + Draws a random sample from the distribution. + + The random number generator to use. + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Generates a sequence of samples from the Exponential distribution. + + The random number generator to use. + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Draws a random sample from the distribution. + + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Generates a sequence of samples from the Exponential distribution. + + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Continuous Univariate F-distribution, also known as Fisher-Snedecor distribution. + For details about this distribution, see + Wikipedia - FisherSnedecor distribution. + + + + + Initializes a new instance of the class. + + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + + + + Initializes a new instance of the class. + + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + + + + Gets the first degree of freedom (d1) of the distribution. Range: d1 > 0. + + + + + Gets the second degree of freedom (d2) of the distribution. Range: d2 > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Generates a sample from the FisherSnedecor distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the FisherSnedecor distribution. + + a sequence of samples from the distribution. + + + + Generates one sample from the FisherSnedecor distribution without parameter checking. + + The random number generator to use. + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + a FisherSnedecor distributed random number. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Generates a sample from the distribution. + + The random number generator to use. + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + a sequence of samples from the distribution. + + + + Continuous Univariate Gamma distribution. + For details about this distribution, see + Wikipedia - Gamma distribution. + + + The Gamma distribution is parametrized by a shape and inverse scale parameter. When we want + to specify a Gamma distribution which is a point distribution we set the shape parameter to be the + location of the point distribution and the inverse scale as positive infinity. The distribution + with shape and inverse scale both zero is undefined. + + Random number generation for the Gamma distribution is based on the algorithm in: + "A Simple Method for Generating Gamma Variables" - Marsaglia & Tsang + ACM Transactions on Mathematical Software, Vol. 26, No. 3, September 2000, Pages 363–372. + + + + + Initializes a new instance of the Gamma class. + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + + + + Initializes a new instance of the Gamma class. + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + The random number generator which is used to draw random samples. + + + + Constructs a Gamma distribution from a shape and scale parameter. The distribution will + be initialized with the default random number generator. + + The shape (k) of the Gamma distribution. Range: k ≥ 0. + The scale (θ) of the Gamma distribution. Range: θ ≥ 0 + The random number generator which is used to draw random samples. Optional, can be null. + + + + Constructs a Gamma distribution from a shape and inverse scale parameter. The distribution will + be initialized with the default random number generator. + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + The random number generator which is used to draw random samples. Optional, can be null. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + + + + Gets or sets the shape (k, α) of the Gamma distribution. Range: α ≥ 0. + + + + + Gets or sets the rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + + + + + Gets or sets the scale (θ) of the Gamma distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the Gamma distribution. + + + + + Gets the variance of the Gamma distribution. + + + + + Gets the standard deviation of the Gamma distribution. + + + + + Gets the entropy of the Gamma distribution. + + + + + Gets the skewness of the Gamma distribution. + + + + + Gets the mode of the Gamma distribution. + + + + + Gets the median of the Gamma distribution. + + + + + Gets the minimum of the Gamma distribution. + + + + + Gets the maximum of the Gamma distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Generates a sample from the Gamma distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Gamma distribution. + + a sequence of samples from the distribution. + + + + Sampling implementation based on: + "A Simple Method for Generating Gamma Variables" - Marsaglia & Tsang + ACM Transactions on Mathematical Software, Vol. 26, No. 3, September 2000, Pages 363–372. + This method performs no parameter checks. + + The random number generator to use. + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + A sample from a Gamma distributed random variable. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + the inverse cumulative density at . + + + + + Generates a sample from the Gamma distribution. + + The random number generator to use. + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the Gamma distribution. + + The random number generator to use. + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Generates a sample from the Gamma distribution. + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the Gamma distribution. + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Discrete Univariate Geometric distribution. + The Geometric distribution is a distribution over positive integers parameterized by one positive real number. + This implementation of the Geometric distribution will never generate 0's. + Wikipedia - geometric distribution. + + + + + Initializes a new instance of the Geometric class. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Initializes a new instance of the Geometric class. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + The random number generator which is used to draw random samples. + + + + Returns a that represents this instance. + + A that represents this instance. + + + + Tests whether the provided values are valid parameters for this distribution. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Gets the probability of generating a one. Range: 0 ≤ p ≤ 1. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + Throws a not supported exception. + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + the cumulative distribution at location . + + + + + Returns one sample from the distribution. + + The random number generator to use. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + One sample from the distribution implied by . + + + + Samples a Geometric distributed random variable. + + A sample from the Geometric distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of Geometric distributed random variables. + + a sequence of samples from the distribution. + + + + Samples a random variable. + + The random number generator to use. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Samples a sequence of this random variable. + + The random number generator to use. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Samples a random variable. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Samples a sequence of this random variable. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Discrete Univariate Hypergeometric distribution. + This distribution is a discrete probability distribution that describes the number of successes in a sequence + of n draws from a finite population without replacement, just as the binomial distribution + describes the number of successes for draws with replacement + Wikipedia - Hypergeometric distribution. + + + + + Initializes a new instance of the Hypergeometric class. + + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Initializes a new instance of the Hypergeometric class. + + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + The random number generator which is used to draw random samples. + + + + Returns a that represents this instance. + + + A that represents this instance. + + + + + Tests whether the provided values are valid parameters for this distribution. + + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the size of the population (N). + + + + + Gets the number of draws without replacement (n). + + + + + Gets the number successes within the population (K, M). + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + the cumulative distribution at location . + + + + + Generates a sample from the Hypergeometric distribution without doing parameter checking. + + The random number generator to use. + The size of the population (N). + The number successes within the population (K, M). + The n parameter of the distribution. + a random number from the Hypergeometric distribution. + + + + Samples a Hypergeometric distributed random variable. + + The number of successes in n trials. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of Hypergeometric distributed random variables. + + a sequence of successes in n trials. + + + + Samples a random variable. + + The random number generator to use. + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Samples a sequence of this random variable. + + The random number generator to use. + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Samples a random variable. + + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Samples a sequence of this random variable. + + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Continuous Univariate Probability Distribution. + + + + + + Gets the mode of the distribution. + + + + + Gets the smallest element in the domain of the distribution which can be represented by a double. + + + + + Gets the largest element in the domain of the distribution which can be represented by a double. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + Draws a random sample from the distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Draws a sequence of random samples from the distribution. + + an infinite sequence of samples from the distribution. + + + + Discrete Univariate Probability Distribution. + + + + + + Gets the mode of the distribution. + + + + + Gets the smallest element in the domain of the distribution which can be represented by an integer. + + + + + Gets the largest element in the domain of the distribution which can be represented by an integer. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Draws a random sample from the distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Draws a sequence of random samples from the distribution. + + an infinite sequence of samples from the distribution. + + + + Probability Distribution. + + + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Continuous Univariate Inverse Gamma distribution. + The inverse Gamma distribution is a distribution over the positive real numbers parameterized by + two positive parameters. + Wikipedia - InverseGamma distribution. + + + + + Initializes a new instance of the class. + + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + + + + Initializes a new instance of the class. + + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + + + + Gets or sets the shape (α) parameter. Range: α > 0. + + + + + Gets or sets The scale (β) parameter. Range: β > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + Throws . + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Draws a random sample from the distribution. + + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Cauchy distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + the cumulative distribution at location . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + a sequence of samples from the distribution. + + + + Multivariate Inverse Wishart distribution. This distribution is + parameterized by the degrees of freedom nu and the scale matrix S. The inverse Wishart distribution + is the conjugate prior for the covariance matrix of a multivariate normal distribution. + Wikipedia - Inverse-Wishart distribution. + + + + + Caches the Cholesky factorization of the scale matrix. + + + + + Initializes a new instance of the class. + + The degree of freedom (ν) for the inverse Wishart distribution. + The scale matrix (Ψ) for the inverse Wishart distribution. + + + + Initializes a new instance of the class. + + The degree of freedom (ν) for the inverse Wishart distribution. + The scale matrix (Ψ) for the inverse Wishart distribution. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The degree of freedom (ν) for the inverse Wishart distribution. + The scale matrix (Ψ) for the inverse Wishart distribution. + + + + Gets or sets the degree of freedom (ν) for the inverse Wishart distribution. + + + + + Gets or sets the scale matrix (Ψ) for the inverse Wishart distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean. + + The mean of the distribution. + + + + Gets the mode of the distribution. + + The mode of the distribution. + A. O'Hagan, and J. J. Forster (2004). Kendall's Advanced Theory of Statistics: Bayesian Inference. 2B (2 ed.). Arnold. ISBN 0-340-80752-0. + + + + Gets the variance of the distribution. + + The variance of the distribution. + Kanti V. Mardia, J. T. Kent and J. M. Bibby (1979). Multivariate Analysis. + + + + Evaluates the probability density function for the inverse Wishart distribution. + + The matrix at which to evaluate the density at. + If the argument does not have the same dimensions as the scale matrix. + the density at . + + + + Samples an inverse Wishart distributed random variable by sampling + a Wishart random variable and inverting the matrix. + + a sample from the distribution. + + + + Samples an inverse Wishart distributed random variable by sampling + a Wishart random variable and inverting the matrix. + + The random number generator to use. + The degree of freedom (ν) for the inverse Wishart distribution. + The scale matrix (Ψ) for the inverse Wishart distribution. + a sample from the distribution. + + + + Univariate Probability Distribution. + + + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the median of the distribution. + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Continuous Univariate Laplace distribution. + The Laplace distribution is a distribution over the real numbers parameterized by a mean and + scale parameter. The PDF is: + p(x) = \frac{1}{2 * scale} \exp{- |x - mean| / scale}. + Wikipedia - Laplace distribution. + + + + + Initializes a new instance of the class (location = 0, scale = 1). + + + + + Initializes a new instance of the class. + + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + If is negative. + + + + Initializes a new instance of the class. + + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + The random number generator which is used to draw random samples. + If is negative. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + + + + Gets the location (μ) of the Laplace distribution. + + + + + Gets the scale (b) of the Laplace distribution. Range: b > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Samples a Laplace distributed random variable. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sample from the Laplace distribution. + + a sample from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + the cumulative distribution at location . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + a sequence of samples from the distribution. + + + + Continuous Univariate Log-Normal distribution. + For details about this distribution, see + Wikipedia - Log-Normal distribution. + + + + + Initializes a new instance of the class. + The distribution will be initialized with the default + random number generator. + + The log-scale (μ) of the logarithm of the distribution. + The shape (σ) of the logarithm of the distribution. Range: σ ≥ 0. + + + + Initializes a new instance of the class. + The distribution will be initialized with the default + random number generator. + + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + The random number generator which is used to draw random samples. + + + + Constructs a log-normal distribution with the desired mu and sigma parameters. + + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + The random number generator which is used to draw random samples. Optional, can be null. + A log-normal distribution. + + + + Constructs a log-normal distribution with the desired mean and variance. + + The mean of the log-normal distribution. + The variance of the log-normal distribution. + The random number generator which is used to draw random samples. Optional, can be null. + A log-normal distribution. + + + + Estimates the log-normal distribution parameters from sample data with maximum-likelihood. + + The samples to estimate the distribution parameters from. + The random number generator which is used to draw random samples. Optional, can be null. + A log-normal distribution. + MATLAB: lognfit + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + + + + Gets the log-scale (μ) (mean of the logarithm) of the distribution. + + + + + Gets the shape (σ) (standard deviation of the logarithm) of the distribution. Range: σ ≥ 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mu of the log-normal distribution. + + + + + Gets the variance of the log-normal distribution. + + + + + Gets the standard deviation of the log-normal distribution. + + + + + Gets the entropy of the log-normal distribution. + + + + + Gets the skewness of the log-normal distribution. + + + + + Gets the mode of the log-normal distribution. + + + + + Gets the median of the log-normal distribution. + + + + + Gets the minimum of the log-normal distribution. + + + + + Gets the maximum of the log-normal distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Generates a sample from the log-normal distribution using the Box-Muller algorithm. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the log-normal distribution using the Box-Muller algorithm. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + the density at . + + MATLAB: lognpdf + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the density. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + the cumulative distribution at location . + + MATLAB: logncdf + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + the inverse cumulative density at . + + MATLAB: logninv + + + + Generates a sample from the log-normal distribution using the Box-Muller algorithm. + + The random number generator to use. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the log-normal distribution using the Box-Muller algorithm. + + The random number generator to use. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + Generates a sample from the log-normal distribution using the Box-Muller algorithm. + + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the log-normal distribution using the Box-Muller algorithm. + + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + Multivariate Matrix-valued Normal distributions. The distribution + is parameterized by a mean matrix (M), a covariance matrix for the rows (V) and a covariance matrix + for the columns (K). If the dimension of M is d-by-m then V is d-by-d and K is m-by-m. + Wikipedia - MatrixNormal distribution. + + + + + The mean of the matrix normal distribution. + + + + + The covariance matrix for the rows. + + + + + The covariance matrix for the columns. + + + + + Initializes a new instance of the class. + + The mean of the matrix normal. + The covariance matrix for the rows. + The covariance matrix for the columns. + If the dimensions of the mean and two covariance matrices don't match. + + + + Initializes a new instance of the class. + + The mean of the matrix normal. + The covariance matrix for the rows. + The covariance matrix for the columns. + The random number generator which is used to draw random samples. + If the dimensions of the mean and two covariance matrices don't match. + + + + Returns a that represents this instance. + + + A that represents this instance. + + + + + Tests whether the provided values are valid parameters for this distribution. + + The mean of the matrix normal. + The covariance matrix for the rows. + The covariance matrix for the columns. + + + + Gets the mean. (M) + + The mean of the distribution. + + + + Gets the row covariance. (V) + + The row covariance. + + + + Gets the column covariance. (K) + + The column covariance. + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Evaluates the probability density function for the matrix normal distribution. + + The matrix at which to evaluate the density at. + the density at + If the argument does not have the correct dimensions. + + + + Samples a matrix normal distributed random variable. + + A random number from this distribution. + + + + Samples a matrix normal distributed random variable. + + The random number generator to use. + The mean of the matrix normal. + The covariance matrix for the rows. + The covariance matrix for the columns. + If the dimensions of the mean and two covariance matrices don't match. + a sequence of samples from the distribution. + + + + Samples a vector normal distributed random variable. + + The random number generator to use. + The mean of the vector normal distribution. + The covariance matrix of the vector normal distribution. + a sequence of samples from defined distribution. + + + + Multivariate Multinomial distribution. For details about this distribution, see + Wikipedia - Multinomial distribution. + + + The distribution is parameterized by a vector of ratios: in other words, the parameter + does not have to be normalized and sum to 1. The reason is that some vectors can't be exactly normalized + to sum to 1 in floating point representation. + + + + + Stores the normalized multinomial probabilities. + + + + + The number of trials. + + + + + Initializes a new instance of the Multinomial class. + + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + The number of trials. + If any of the probabilities are negative or do not sum to one. + If is negative. + + + + Initializes a new instance of the Multinomial class. + + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + The number of trials. + The random number generator which is used to draw random samples. + If any of the probabilities are negative or do not sum to one. + If is negative. + + + + Initializes a new instance of the Multinomial class from histogram . The distribution will + not be automatically updated when the histogram changes. + + Histogram instance + The number of trials. + If any of the probabilities are negative or do not sum to one. + If is negative. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + The number of trials. + If any of the probabilities are negative returns false, + if the sum of parameters is 0.0, or if the number of trials is negative; otherwise true. + + + + Gets the proportion of ratios. + + + + + Gets the number of trials. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Computes values of the probability mass function. + + Non-negative integers x1, ..., xk + The probability mass at location . + When is null. + When length of is not equal to event probabilities count. + + + + Computes values of the log probability mass function. + + Non-negative integers x1, ..., xk + The log probability mass at location . + When is null. + When length of is not equal to event probabilities count. + + + + Samples one multinomial distributed random variable. + + the counts for each of the different possible values. + + + + Samples a sequence multinomially distributed random variables. + + a sequence of counts for each of the different possible values. + + + + Samples one multinomial distributed random variable. + + The random number generator to use. + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + The number of trials. + the counts for each of the different possible values. + + + + Samples a multinomially distributed random variable. + + The random number generator to use. + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + The number of variables needed. + a sequence of counts for each of the different possible values. + + + + Discrete Univariate Negative Binomial distribution. + The negative binomial is a distribution over the natural numbers with two parameters r, p. For the special + case that r is an integer one can interpret the distribution as the number of failures before the r'th success + when the probability of success is p. + Wikipedia - NegativeBinomial distribution. + + + + + Initializes a new instance of the class. + + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Initializes a new instance of the class. + + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + The random number generator which is used to draw random samples. + + + + Returns a that represents this instance. + + + A that represents this instance. + + + + + Tests whether the provided values are valid parameters for this distribution. + + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Gets the number of successes. Range: r ≥ 0. + + + + + Gets the probability of success. Range: 0 ≤ p ≤ 1. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution + + + + + Gets the median of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + the cumulative distribution at location . + + + + + Samples a negative binomial distributed random variable. + + The random number generator to use. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + a sample from the distribution. + + + + Samples a NegativeBinomial distributed random variable. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of NegativeBinomial distributed random variables. + + a sequence of samples from the distribution. + + + + Samples a random variable. + + The random number generator to use. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Samples a sequence of this random variable. + + The random number generator to use. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Samples a random variable. + + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Samples a sequence of this random variable. + + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Continuous Univariate Normal distribution, also known as Gaussian distribution. + For details about this distribution, see + Wikipedia - Normal distribution. + + + + + Initializes a new instance of the Normal class. This is a normal distribution with mean 0.0 + and standard deviation 1.0. The distribution will + be initialized with the default random number generator. + + + + + Initializes a new instance of the Normal class. This is a normal distribution with mean 0.0 + and standard deviation 1.0. The distribution will + be initialized with the default random number generator. + + The random number generator which is used to draw random samples. + + + + Initializes a new instance of the Normal class with a particular mean and standard deviation. The distribution will + be initialized with the default random number generator. + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + + + + Initializes a new instance of the Normal class with a particular mean and standard deviation. The distribution will + be initialized with the default random number generator. + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + The random number generator which is used to draw random samples. + + + + Constructs a normal distribution from a mean and standard deviation. + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + The random number generator which is used to draw random samples. Optional, can be null. + a normal distribution. + + + + Constructs a normal distribution from a mean and variance. + + The mean (μ) of the normal distribution. + The variance (σ^2) of the normal distribution. + The random number generator which is used to draw random samples. Optional, can be null. + A normal distribution. + + + + Constructs a normal distribution from a mean and precision. + + The mean (μ) of the normal distribution. + The precision of the normal distribution. + The random number generator which is used to draw random samples. Optional, can be null. + A normal distribution. + + + + Estimates the normal distribution parameters from sample data with maximum-likelihood. + + The samples to estimate the distribution parameters from. + The random number generator which is used to draw random samples. Optional, can be null. + A normal distribution. + MATLAB: normfit + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + + + + Gets the mean (μ) of the normal distribution. + + + + + Gets the standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + + + + + Gets the variance of the normal distribution. + + + + + Gets the precision of the normal distribution. + + + + + Gets the random number generator which is used to draw random samples. + + + + + Gets the entropy of the normal distribution. + + + + + Gets the skewness of the normal distribution. + + + + + Gets the mode of the normal distribution. + + + + + Gets the median of the normal distribution. + + + + + Gets the minimum of the normal distribution. + + + + + Gets the maximum of the normal distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Generates a sample from the normal distribution using the Box-Muller algorithm. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the normal distribution using the Box-Muller algorithm. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + The location at which to compute the density. + the density at . + + MATLAB: normpdf + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + the cumulative distribution at location . + + MATLAB: normcdf + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + the inverse cumulative density at . + + MATLAB: norminv + + + + Generates a sample from the normal distribution using the Box-Muller algorithm. + + The random number generator to use. + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the normal distribution using the Box-Muller algorithm. + + The random number generator to use. + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + Generates a sample from the normal distribution using the Box-Muller algorithm. + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the normal distribution using the Box-Muller algorithm. + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + This structure represents the type over which the distribution + is defined. + + + + + The mean value. + + + + + The precision value. + + + + + Initializes a new instance of the struct. + + The mean of the pair. + The precision of the pair. + + + + Gets or sets the mean of the pair. + + + + + Gets or sets the precision of the pair. + + + + + Multivariate Normal-Gamma Distribution. + The distribution is the conjugate prior distribution for the + distribution. It specifies a prior over the mean and precision of the distribution. + It is parameterized by four numbers: the mean location, the mean scale, the precision shape and the + precision inverse scale. + The distribution NG(mu, tau | mloc,mscale,psscale,pinvscale) = Normal(mu | mloc, 1/(mscale*tau)) * Gamma(tau | psscale,pinvscale). + The following degenerate cases are special: when the precision is known, + the precision shape will encode the value of the precision while the precision inverse scale is positive + infinity. When the mean is known, the mean location will encode the value of the mean while the scale + will be positive infinity. A completely degenerate NormalGamma distribution with known mean and precision is possible as well. + Wikipedia - Normal-Gamma distribution. + + + + + Initializes a new instance of the class. + + The location of the mean. + The scale of the mean. + The shape of the precision. + The inverse scale of the precision. + + + + Initializes a new instance of the class. + + The location of the mean. + The scale of the mean. + The shape of the precision. + The inverse scale of the precision. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The location of the mean. + The scale of the mean. + The shape of the precision. + The inverse scale of the precision. + + + + Gets the location of the mean. + + + + + Gets the scale of the mean. + + + + + Gets the shape of the precision. + + + + + Gets the inverse scale of the precision. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Returns the marginal distribution for the mean of the NormalGamma distribution. + + the marginal distribution for the mean of the NormalGamma distribution. + + + + Returns the marginal distribution for the precision of the distribution. + + The marginal distribution for the precision of the distribution/ + + + + Gets the mean of the distribution. + + The mean of the distribution. + + + + Gets the variance of the distribution. + + The mean of the distribution. + + + + Evaluates the probability density function for a NormalGamma distribution. + + The mean/precision pair of the distribution + Density value + + + + Evaluates the probability density function for a NormalGamma distribution. + + The mean of the distribution + The precision of the distribution + Density value + + + + Evaluates the log probability density function for a NormalGamma distribution. + + The mean/precision pair of the distribution + The log of the density value + + + + Evaluates the log probability density function for a NormalGamma distribution. + + The mean of the distribution + The precision of the distribution + The log of the density value + + + + Generates a sample from the NormalGamma distribution. + + a sample from the distribution. + + + + Generates a sequence of samples from the NormalGamma distribution + + a sequence of samples from the distribution. + + + + Generates a sample from the NormalGamma distribution. + + The random number generator to use. + The location of the mean. + The scale of the mean. + The shape of the precision. + The inverse scale of the precision. + a sample from the distribution. + + + + Generates a sequence of samples from the NormalGamma distribution + + The random number generator to use. + The location of the mean. + The scale of the mean. + The shape of the precision. + The inverse scale of the precision. + a sequence of samples from the distribution. + + + + Continuous Univariate Pareto distribution. + The Pareto distribution is a power law probability distribution that coincides with social, + scientific, geophysical, actuarial, and many other types of observable phenomena. + For details about this distribution, see + Wikipedia - Pareto distribution. + + + + + Initializes a new instance of the class. + + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + If or are negative. + + + + Initializes a new instance of the class. + + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + The random number generator which is used to draw random samples. + If or are negative. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + + + + Gets the scale (xm) of the distribution. Range: xm > 0. + + + + + Gets the shape (α) of the distribution. Range: α > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Draws a random sample from the distribution. + + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Pareto distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + the inverse cumulative density at . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + a sequence of samples from the distribution. + + + + Discrete Univariate Poisson distribution. + + + Distribution is described at Wikipedia - Poisson distribution. + Knuth's method is used to generate Poisson distributed random variables. + f(x) = exp(-λ)*λ^x/x!; + + + + + Initializes a new instance of the class. + + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + If is equal or less then 0.0. + + + + Initializes a new instance of the class. + + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + The random number generator which is used to draw random samples. + If is equal or less then 0.0. + + + + Returns a that represents this instance. + + + A that represents this instance. + + + + + Tests whether the provided values are valid parameters for this distribution. + + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + + + + Gets the Poisson distribution parameter λ. Range: λ > 0. + + + + + Gets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + Approximation, see Wikipedia Poisson distribution + + + + Gets the skewness of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + Approximation, see Wikipedia Poisson distribution + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + the cumulative distribution at location . + + + + + Generates one sample from the Poisson distribution. + + The random source to use. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + A random sample from the Poisson distribution. + + + + Generates one sample from the Poisson distribution by Knuth's method. + + The random source to use. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + A random sample from the Poisson distribution. + + + + Generates one sample from the Poisson distribution by "Rejection method PA". + + The random source to use. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + A random sample from the Poisson distribution. + "Rejection method PA" from "The Computer Generation of Poisson Random Variables" by A. C. Atkinson, + Journal of the Royal Statistical Society Series C (Applied Statistics) Vol. 28, No. 1. (1979) + The article is on pages 29-35. The algorithm given here is on page 32. + + + + Samples a Poisson distributed random variable. + + A sample from the Poisson distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of Poisson distributed random variables. + + a sequence of successes in N trials. + + + + Samples a Poisson distributed random variable. + + The random number generator to use. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + A sample from the Poisson distribution. + + + + Samples a sequence of Poisson distributed random variables. + + The random number generator to use. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Samples a Poisson distributed random variable. + + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + A sample from the Poisson distribution. + + + + Samples a sequence of Poisson distributed random variables. + + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Continuous Univariate Rayleigh distribution. + The Rayleigh distribution (pronounced /ˈreɪli/) is a continuous probability distribution. As an + example of how it arises, the wind speed will have a Rayleigh distribution if the components of + the two-dimensional wind velocity vector are uncorrelated and normally distributed with equal variance. + For details about this distribution, see + Wikipedia - Rayleigh distribution. + + + + + Initializes a new instance of the class. + + The scale (σ) of the distribution. Range: σ > 0. + If is negative. + + + + Initializes a new instance of the class. + + The scale (σ) of the distribution. Range: σ > 0. + The random number generator which is used to draw random samples. + If is negative. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The scale (σ) of the distribution. Range: σ > 0. + + + + Gets the scale (σ) of the distribution. Range: σ > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Draws a random sample from the distribution. + + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Rayleigh distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The scale (σ) of the distribution. Range: σ > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The scale (σ) of the distribution. Range: σ > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The scale (σ) of the distribution. Range: σ > 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The scale (σ) of the distribution. Range: σ > 0. + the inverse cumulative density at . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The scale (σ) of the distribution. Range: σ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The scale (σ) of the distribution. Range: σ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Continuous Univariate Stable distribution. + A random variable is said to be stable (or to have a stable distribution) if it has + the property that a linear combination of two independent copies of the variable has + the same distribution, up to location and scale parameters. + For details about this distribution, see + Wikipedia - Stable distribution. + + + + + Initializes a new instance of the class. + + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + + + + Initializes a new instance of the class. + + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + + + + Gets the stability (α) of the distribution. Range: 2 ≥ α > 0. + + + + + Gets The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + + + + + Gets the scale (c) of the distribution. Range: c > 0. + + + + + Gets the location (μ) of the distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets he entropy of the distribution. + + Always throws a not supported exception. + + + + Gets the skewness of the distribution. + + Throws a not supported exception of Alpha != 2. + + + + Gets the mode of the distribution. + + Throws a not supported exception if Beta != 0. + + + + Gets the median of the distribution. + + Throws a not supported exception if Beta != 0. + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + Throws a not supported exception if Alpha != 2, (Alpha != 1 and Beta !=0), or (Alpha != 0.5 and Beta != 1) + + + + Samples the distribution. + + The random number generator to use. + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + a random number from the distribution. + + + + Draws a random sample from the distribution. + + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Stable distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + the cumulative distribution at location . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + a sequence of samples from the distribution. + + + + Continuous Univariate Student's T-distribution. + Implements the univariate Student t-distribution. For details about this + distribution, see + + Wikipedia - Student's t-distribution. + + We use a slightly generalized version (compared to + Wikipedia) of the Student t-distribution. Namely, one which also + parameterizes the location and scale. See the book "Bayesian Data + Analysis" by Gelman et al. for more details. + The density of the Student t-distribution p(x|mu,scale,dof) = + Gamma((dof+1)/2) (1 + (x - mu)^2 / (scale * scale * dof))^(-(dof+1)/2) / + (Gamma(dof/2)*Sqrt(dof*pi*scale)). + The distribution will use the by + default. Users can get/set the random number generator by using the + property. + The statistics classes will check all the incoming parameters + whether they are in the allowed range. This might involve heavy + computation. Optionally, by setting Control.CheckDistributionParameters + to false, all parameter checks can be turned off. + + + + Initializes a new instance of the StudentT class. This is a Student t-distribution with location 0.0 + scale 1.0 and degrees of freedom 1. + + + + + Initializes a new instance of the StudentT class with a particular location, scale and degrees of + freedom. + + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + + + + Initializes a new instance of the StudentT class with a particular location, scale and degrees of + freedom. + + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + + + + Gets the location (μ) of the Student t-distribution. + + + + + Gets the scale (σ) of the Student t-distribution. Range: σ > 0. + + + + + Gets the degrees of freedom (ν) of the Student t-distribution. Range: ν > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the Student t-distribution. + + + + + Gets the variance of the Student t-distribution. + + + + + Gets the standard deviation of the Student t-distribution. + + + + + Gets the entropy of the Student t-distribution. + + + + + Gets the skewness of the Student t-distribution. + + + + + Gets the mode of the Student t-distribution. + + + + + Gets the median of the Student t-distribution. + + + + + Gets the minimum of the Student t-distribution. + + + + + Gets the maximum of the Student t-distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Samples student-t distributed random variables. + + The algorithm is method 2 in section 5, chapter 9 + in L. Devroye's "Non-Uniform Random Variate Generation" + The random number generator to use. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + a random number from the standard student-t distribution. + + + + Generates a sample from the Student t-distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Student t-distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Generates a sample from the Student t-distribution. + + The random number generator to use. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the Student t-distribution using the Box-Muller algorithm. + + The random number generator to use. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the Student t-distribution. + + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the Student t-distribution using the Box-Muller algorithm. + + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + a sequence of samples from the distribution. + + + + Triangular distribution. + For details, see Wikipedia - Triangular distribution. + + The distribution will use the by default. + Users can get/set the random number generator by using the property. + The statistics classes will check whether all the incoming parameters are in the allowed range. This might involve heavy computation. Optionally, by setting Control.CheckDistributionParameters + to false, all parameter checks can be turned off. + + + + Initializes a new instance of the Triangular class with the given lower bound, upper bound and mode. + + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + If the upper bound is smaller than the mode or if the mode is smaller than the lower bound. + + + + Initializes a new instance of the Triangular class with the given lower bound, upper bound and mode. + + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + The random number generator which is used to draw random samples. + If the upper bound is smaller than the mode or if the mode is smaller than the lower bound. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + + + + Gets the lower bound of the distribution. + + + + + Gets the upper bound of the distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + + Gets the skewness of the distribution. + + + + + Gets or sets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Generates a sample from the Triangular distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Triangular distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + the inverse cumulative density at . + + + + + Generates a sample from the Triangular distribution. + + The random number generator to use. + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + a sample from the distribution. + + + + Generates a sequence of samples from the Triangular distribution. + + The random number generator to use. + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + a sequence of samples from the distribution. + + + + Generates a sample from the Triangular distribution. + + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + a sample from the distribution. + + + + Generates a sequence of samples from the Triangular distribution. + + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + a sequence of samples from the distribution. + + + + Continuous Univariate Weibull distribution. + For details about this distribution, see + Wikipedia - Weibull distribution. + + + The Weibull distribution is parametrized by a shape and scale parameter. + + + + + Reusable intermediate result 1 / (_scale ^ _shape) + + + By caching this parameter we can get slightly better numerics precision + in certain constellations without any additional computations. + + + + + Initializes a new instance of the Weibull class. + + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + + + + Initializes a new instance of the Weibull class. + + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + + + + Gets the shape (k) of the Weibull distribution. Range: k > 0. + + + + + Gets the scale (λ) of the Weibull distribution. Range: λ > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the Weibull distribution. + + + + + Gets the variance of the Weibull distribution. + + + + + Gets the standard deviation of the Weibull distribution. + + + + + Gets the entropy of the Weibull distribution. + + + + + Gets the skewness of the Weibull distribution. + + + + + Gets the mode of the Weibull distribution. + + + + + Gets the median of the Weibull distribution. + + + + + Gets the minimum of the Weibull distribution. + + + + + Gets the maximum of the Weibull distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Generates a sample from the Weibull distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Weibull distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + the cumulative distribution at location . + + + + + Implemented according to: Parameter estimation of the Weibull probability distribution, 1994, Hongzhu Qiao, Chris P. Tsokos + + + + Returns a Weibull distribution. + + + + Generates a sample from the Weibull distribution. + + The random number generator to use. + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the Weibull distribution. + + The random number generator to use. + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the Weibull distribution. + + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the Weibull distribution. + + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Multivariate Wishart distribution. This distribution is + parameterized by the degrees of freedom nu and the scale matrix S. The Wishart distribution + is the conjugate prior for the precision (inverse covariance) matrix of the multivariate + normal distribution. + Wikipedia - Wishart distribution. + + + + + The degrees of freedom for the Wishart distribution. + + + + + The scale matrix for the Wishart distribution. + + + + + Caches the Cholesky factorization of the scale matrix. + + + + + Initializes a new instance of the class. + + The degrees of freedom (n) for the Wishart distribution. + The scale matrix (V) for the Wishart distribution. + + + + Initializes a new instance of the class. + + The degrees of freedom (n) for the Wishart distribution. + The scale matrix (V) for the Wishart distribution. + The random number generator which is used to draw random samples. + + + + Tests whether the provided values are valid parameters for this distribution. + + The degrees of freedom (n) for the Wishart distribution. + The scale matrix (V) for the Wishart distribution. + + + + Gets or sets the degrees of freedom (n) for the Wishart distribution. + + + + + Gets or sets the scale matrix (V) for the Wishart distribution. + + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + The mean of the distribution. + + + + Gets the mode of the distribution. + + The mode of the distribution. + + + + Gets the variance of the distribution. + + The variance of the distribution. + + + + Evaluates the probability density function for the Wishart distribution. + + The matrix at which to evaluate the density at. + If the argument does not have the same dimensions as the scale matrix. + the density at . + + + + Samples a Wishart distributed random variable using the method + Algorithm AS 53: Wishart Variate Generator + W. B. Smith and R. R. Hocking + Applied Statistics, Vol. 21, No. 3 (1972), pp. 341-345 + + A random number from this distribution. + + + + Samples a Wishart distributed random variable using the method + Algorithm AS 53: Wishart Variate Generator + W. B. Smith and R. R. Hocking + Applied Statistics, Vol. 21, No. 3 (1972), pp. 341-345 + + The random number generator to use. + The degrees of freedom (n) for the Wishart distribution. + The scale matrix (V) for the Wishart distribution. + a sequence of samples from the distribution. + + + + Samples the distribution. + + The random number generator to use. + The degrees of freedom (n) for the Wishart distribution. + The scale matrix (V) for the Wishart distribution. + The cholesky decomposition to use. + a random number from the distribution. + + + + Discrete Univariate Zipf distribution. + Zipf's law, an empirical law formulated using mathematical statistics, refers to the fact + that many types of data studied in the physical and social sciences can be approximated with + a Zipfian distribution, one of a family of related discrete power law probability distributions. + For details about this distribution, see + Wikipedia - Zipf distribution. + + + + + The s parameter of the distribution. + + + + + The n parameter of the distribution. + + + + + Initializes a new instance of the class. + + The s parameter of the distribution. + The n parameter of the distribution. + + + + Initializes a new instance of the class. + + The s parameter of the distribution. + The n parameter of the distribution. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The s parameter of the distribution. + The n parameter of the distribution. + + + + Gets or sets the s parameter of the distribution. + + + + + Gets or sets the n parameter of the distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The s parameter of the distribution. + The n parameter of the distribution. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The s parameter of the distribution. + The n parameter of the distribution. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The s parameter of the distribution. + The n parameter of the distribution. + the cumulative distribution at location . + + + + + Generates a sample from the Zipf distribution without doing parameter checking. + + The random number generator to use. + The s parameter of the distribution. + The n parameter of the distribution. + a random number from the Zipf distribution. + + + + Draws a random sample from the distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of zipf distributed random variables. + + a sequence of samples from the distribution. + + + + Samples a random variable. + + The random number generator to use. + The s parameter of the distribution. + The n parameter of the distribution. + + + + Samples a sequence of this random variable. + + The random number generator to use. + The s parameter of the distribution. + The n parameter of the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The s parameter of the distribution. + The n parameter of the distribution. + + + + Samples a random variable. + + The s parameter of the distribution. + The n parameter of the distribution. + + + + Samples a sequence of this random variable. + + The s parameter of the distribution. + The n parameter of the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The s parameter of the distribution. + The n parameter of the distribution. + + + + Integer number theory functions. + + + + + Canonical Modulus. The result has the sign of the divisor. + + + + + Canonical Modulus. The result has the sign of the divisor. + + + + + Canonical Modulus. The result has the sign of the divisor. + + + + + Canonical Modulus. The result has the sign of the divisor. + + + + + Remainder (% operator). The result has the sign of the dividend. + + + + + Remainder (% operator). The result has the sign of the dividend. + + + + + Remainder (% operator). The result has the sign of the dividend. + + + + + Remainder (% operator). The result has the sign of the dividend. + + + + + Find out whether the provided 32 bit integer is an even number. + + The number to very whether it's even. + True if and only if it is an even number. + + + + Find out whether the provided 64 bit integer is an even number. + + The number to very whether it's even. + True if and only if it is an even number. + + + + Find out whether the provided 32 bit integer is an odd number. + + The number to very whether it's odd. + True if and only if it is an odd number. + + + + Find out whether the provided 64 bit integer is an odd number. + + The number to very whether it's odd. + True if and only if it is an odd number. + + + + Find out whether the provided 32 bit integer is a perfect power of two. + + The number to very whether it's a power of two. + True if and only if it is a power of two. + + + + Find out whether the provided 64 bit integer is a perfect power of two. + + The number to very whether it's a power of two. + True if and only if it is a power of two. + + + + Find out whether the provided 32 bit integer is a perfect square, i.e. a square of an integer. + + The number to very whether it's a perfect square. + True if and only if it is a perfect square. + + + + Find out whether the provided 64 bit integer is a perfect square, i.e. a square of an integer. + + The number to very whether it's a perfect square. + True if and only if it is a perfect square. + + + + Raises 2 to the provided integer exponent (0 <= exponent < 31). + + The exponent to raise 2 up to. + 2 ^ exponent. + + + + + Raises 2 to the provided integer exponent (0 <= exponent < 63). + + The exponent to raise 2 up to. + 2 ^ exponent. + + + + + Evaluate the binary logarithm of an integer number. + + Two-step method using a De Bruijn-like sequence table lookup. + + + + Find the closest perfect power of two that is larger or equal to the provided + 32 bit integer. + + The number of which to find the closest upper power of two. + A power of two. + + + + + Find the closest perfect power of two that is larger or equal to the provided + 64 bit integer. + + The number of which to find the closest upper power of two. + A power of two. + + + + + Returns the greatest common divisor (gcd) of two integers using Euclid's algorithm. + + First Integer: a. + Second Integer: b. + Greatest common divisor gcd(a,b) + + + + Returns the greatest common divisor (gcd) of a set of integers using Euclid's + algorithm. + + List of Integers. + Greatest common divisor gcd(list of integers) + + + + Returns the greatest common divisor (gcd) of a set of integers using Euclid's algorithm. + + List of Integers. + Greatest common divisor gcd(list of integers) + + + + Computes the extended greatest common divisor, such that a*x + b*y = gcd(a,b). + + First Integer: a. + Second Integer: b. + Resulting x, such that a*x + b*y = gcd(a,b). + Resulting y, such that a*x + b*y = gcd(a,b) + Greatest common divisor gcd(a,b) + + + long x,y,d; + d = Fn.GreatestCommonDivisor(45,18,out x, out y); + -> d == 9 && x == 1 && y == -2 + + The gcd of 45 and 18 is 9: 18 = 2*9, 45 = 5*9. 9 = 1*45 -2*18, therefore x=1 and y=-2. + + + + + Returns the least common multiple (lcm) of two integers using Euclid's algorithm. + + First Integer: a. + Second Integer: b. + Least common multiple lcm(a,b) + + + + Returns the least common multiple (lcm) of a set of integers using Euclid's algorithm. + + List of Integers. + Least common multiple lcm(list of integers) + + + + Returns the least common multiple (lcm) of a set of integers using Euclid's algorithm. + + List of Integers. + Least common multiple lcm(list of integers) + + + + Collection of functions equivalent to those provided by Microsoft Excel + but backed instead by Math.NET Numerics. + We do not recommend to use them except in an intermediate phase when + porting over solutions previously implemented in Excel. + + + + + An algorithm failed to converge. + + + + + An algorithm failed to converge due to a numerical breakdown. + + + + + An error occured calling native provider function. + + + + + An error occured calling native provider function. + + + + + Native provider was unable to allocate sufficent memory. + + + + + Native provider failed LU inversion do to a singular U matrix. + + + + + Compound Monthly Return or Geometric Return or Annualized Return + + + + + Average Gain or Gain Mean + This is a simple average (arithmetic mean) of the periods with a gain. It is calculated by summing the returns for gain periods (return 0) + and then dividing the total by the number of gain periods. + + http://www.offshore-library.com/kb/statistics.php + + + + Average Loss or LossMean + This is a simple average (arithmetic mean) of the periods with a loss. It is calculated by summing the returns for loss periods (return < 0) + and then dividing the total by the number of loss periods. + + http://www.offshore-library.com/kb/statistics.php + + + + Calculation is similar to Standard Deviation , except it calculates an average (mean) return only for periods with a gain + and measures the variation of only the gain periods around the gain mean. Measures the volatility of upside performance. + © Copyright 1996, 1999 Gary L.Gastineau. First Edition. © 1992 Swiss Bank Corporation. + + + + + Similar to standard deviation, except this statistic calculates an average (mean) return for only the periods with a loss and then + measures the variation of only the losing periods around this loss mean. This statistic measures the volatility of downside performance. + + http://www.offshore-library.com/kb/statistics.php + + + + This measure is similar to the loss standard deviation except the downside deviation + considers only returns that fall below a defined minimum acceptable return (MAR) rather than the arithmetic mean. + For example, if the MAR is 7%, the downside deviation would measure the variation of each period that falls below + 7%. (The loss standard deviation, on the other hand, would take only losing periods, calculate an average return for + the losing periods, and then measure the variation between each losing return and the losing return average). + + + + + A measure of volatility in returns below the mean. It's similar to standard deviation, but it only + looks at periods where the investment return was less than average return. + + + + + Measures a fund’s average gain in a gain period divided by the fund’s average loss in a losing + period. Periods can be monthly or quarterly depending on the data frequency. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Example: 1e-14. + Maximum number of iterations. Example: 100. + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first derivative of the function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Example: 1e-14. + Maximum number of iterations. Example: 100. + + + + Find both complex roots of the quadratic equation c + b*x + a*x^2 = 0. + Note the special coefficient order ascending by exponent (consistent with polynomials). + + + + + Find all three complex roots of the cubic equation d + c*x + b*x^2 + a*x^3 = 0. + Note the special coefficient order ascending by exponent (consistent with polynomials). + + + + + Find all roots of the Chebychev polynomial of the first kind. + + The polynomial order and therefore the number of roots. + The real domain interval begin where to start sampling. + The real domain interval end where to stop sampling. + Samples in [a,b] at (b+a)/2+(b-1)/2*cos(pi*(2i-1)/(2n)) + + + + Find all roots of the Chebychev polynomial of the second kind. + + The polynomial order and therefore the number of roots. + The real domain interval begin where to start sampling. + The real domain interval end where to stop sampling. + Samples in [a,b] at (b+a)/2+(b-1)/2*cos(pi*i/(n-1)) + + + + Least-Squares Curve Fitting Routines + + + + + Least-Squares fitting the points (x,y) to a line y : x -> a+b*x, + returning its best fitting parameters as [a, b] array, + where a is the intercept and b the slope. + + + + + Least-Squares fitting the points (x,y) to a line y : x -> a+b*x, + returning a function y' for the best fitting line. + + + + + Least-Squares fitting the points (X,y) = ((x0,x1,..,xk),y) to a linear surface y : X -> p0*x0 + p1*x1 + ... + pk*xk, + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + If an intercept is added, its coefficient will be prepended to the resulting parameters. + + + + + Least-Squares fitting the points (X,y) = ((x0,x1,..,xk),y) to a linear surface y : X -> p0*x0 + p1*x1 + ... + pk*xk, + returning a function y' for the best fitting combination. + If an intercept is added, its coefficient will be prepended to the resulting parameters. + + + + + Weighted Least-Squares fitting the points (X,y) = ((x0,x1,..,xk),y) and weights w to a linear surface y : X -> p0*x0 + p1*x1 + ... + pk*xk, + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + + + + + Least-Squares fitting the points (x,y) to a k-order polynomial y : x -> p0 + p1*x + p2*x^2 + ... + pk*x^k, + returning its best fitting parameters as [p0, p1, p2, ..., pk] array, compatible with Evaluate.Polynomial. + A polynomial with order/degree k has (k+1) coefficients and thus requires at least (k+1) samples. + + + + + Least-Squares fitting the points (x,y) to a k-order polynomial y : x -> p0 + p1*x + p2*x^2 + ... + pk*x^k, + returning a function y' for the best fitting polynomial. + A polynomial with order/degree k has (k+1) coefficients and thus requires at least (k+1) samples. + + + + + Weighted Least-Squares fitting the points (x,y) and weights w to a k-order polynomial y : x -> p0 + p1*x + p2*x^2 + ... + pk*x^k, + returning its best fitting parameters as [p0, p1, p2, ..., pk] array, compatible with Evaluate.Polynomial. + A polynomial with order/degree k has (k+1) coefficients and thus requires at least (k+1) samples. + + + + + Least-Squares fitting the points (x,y) to an arbitrary linear combination y : x -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + + + + + Least-Squares fitting the points (x,y) to an arbitrary linear combination y : x -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning a function y' for the best fitting combination. + + + + + Least-Squares fitting the points (x,y) to an arbitrary linear combination y : x -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + + + + + Least-Squares fitting the points (x,y) to an arbitrary linear combination y : x -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning a function y' for the best fitting combination. + + + + + Least-Squares fitting the points (X,y) = ((x0,x1,..,xk),y) to an arbitrary linear combination y : X -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + + + + + Least-Squares fitting the points (X,y) = ((x0,x1,..,xk),y) to an arbitrary linear combination y : X -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning a function y' for the best fitting combination. + + + + + Least-Squares fitting the points (X,y) = ((x0,x1,..,xk),y) to an arbitrary linear combination y : X -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + + + + + Least-Squares fitting the points (X,y) = ((x0,x1,..,xk),y) to an arbitrary linear combination y : X -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning a function y' for the best fitting combination. + + + + + Least-Squares fitting the points (T,y) = (T,y) to an arbitrary linear combination y : X -> p0*f0(T) + p1*f1(T) + ... + pk*fk(T), + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + + + + + Least-Squares fitting the points (T,y) = (T,y) to an arbitrary linear combination y : X -> p0*f0(T) + p1*f1(T) + ... + pk*fk(T), + returning a function y' for the best fitting combination. + + + + + Least-Squares fitting the points (T,y) = (T,y) to an arbitrary linear combination y : X -> p0*f0(T) + p1*f1(T) + ... + pk*fk(T), + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + + + + + Least-Squares fitting the points (T,y) = (T,y) to an arbitrary linear combination y : X -> p0*f0(T) + p1*f1(T) + ... + pk*fk(T), + returning a function y' for the best fitting combination. + + + + + Generate samples by sampling a function at the provided points. + + + + + Generate a sample sequence by sampling a function at the provided point sequence. + + + + + Generate samples by sampling a function at the provided points. + + + + + Generate a sample sequence by sampling a function at the provided point sequence. + + + + + Generate a linearly spaced sample vector of the given length between the specified values (inclusive). + Equivalent to MATLAB linspace but with the length as first instead of last argument. + + + + + Generate samples by sampling a function at linearly spaced points between the specified values (inclusive). + + + + + Generate a base 10 logarithmically spaced sample vector of the given length between the specified decade exponents (inclusive). + Equivalent to MATLAB logspace but with the length as first instead of last argument. + + + + + Generate samples by sampling a function at base 10 logarithmically spaced points between the specified decade exponents (inclusive). + + + + + Generate a linearly spaced sample vector within the inclusive interval (start, stop) and step 1. + Equivalent to MATLAB colon operator (:). + + + + + Generate a linearly spaced sample vector within the inclusive interval (start, stop) and step 1. + Equivalent to MATLAB colon operator (:). + + + + + Generate a linearly spaced sample vector within the inclusive interval (start, stop) and the provided step. + The start value is aways included as first value, but stop is only included if it stop-start is a multiple of step. + Equivalent to MATLAB double colon operator (::). + + + + + Generate a linearly spaced sample vector within the inclusive interval (start, stop) and the provided step. + The start value is aways included as first value, but stop is only included if it stop-start is a multiple of step. + Equivalent to MATLAB double colon operator (::). + + + + + Generate a linearly spaced sample vector within the inclusive interval (start, stop) and the provide step. + The start value is aways included as first value, but stop is only included if it stop-start is a multiple of step. + Equivalent to MATLAB double colon operator (::). + + + + + Generate samples by sampling a function at linearly spaced points within the inclusive interval (start, stop) and the provide step. + The start value is aways included as first value, but stop is only included if it stop-start is a multiple of step. + + + + + Create a periodic wave. + + The number of samples to generate. + Samples per time unit (Hz). Must be larger than twice the frequency to satisfy the Nyquist criterion. + Frequency in periods per time unit (Hz). + The length of the period when sampled at one sample per time unit. This is the interval of the periodic domain, a typical value is 1.0, or 2*Pi for angular functions. + Optional phase offset. + Optional delay, relative to the phase. + + + + Create a periodic wave. + + The number of samples to generate. + The function to apply to each of the values and evaluate the resulting sample. + Samples per time unit (Hz). Must be larger than twice the frequency to satisfy the Nyquist criterion. + Frequency in periods per time unit (Hz). + The length of the period when sampled at one sample per time unit. This is the interval of the periodic domain, a typical value is 1.0, or 2*Pi for angular functions. + Optional phase offset. + Optional delay, relative to the phase. + + + + Create an infinite periodic wave sequence. + + Samples per time unit (Hz). Must be larger than twice the frequency to satisfy the Nyquist criterion. + Frequency in periods per time unit (Hz). + The length of the period when sampled at one sample per time unit. This is the interval of the periodic domain, a typical value is 1.0, or 2*Pi for angular functions. + Optional phase offset. + Optional delay, relative to the phase. + + + + Create an infinite periodic wave sequence. + + The function to apply to each of the values and evaluate the resulting sample. + Samples per time unit (Hz). Must be larger than twice the frequency to satisfy the Nyquist criterion. + Frequency in periods per time unit (Hz). + The length of the period when sampled at one sample per time unit. This is the interval of the periodic domain, a typical value is 1.0, or 2*Pi for angular functions. + Optional phase offset. + Optional delay, relative to the phase. + + + + Create a Sine wave. + + The number of samples to generate. + Samples per time unit (Hz). Must be larger than twice the frequency to satisfy the Nyquist criterion. + Frequency in periods per time unit (Hz). + The maximal reached peak. + The mean, or DC part, of the signal. + Optional phase offset. + Optional delay, relative to the phase. + + + + Create an infinite Sine wave sequence. + + Samples per unit. + Frequency in samples per unit. + The maximal reached peak. + The mean, or DC part, of the signal. + Optional phase offset. + Optional delay, relative to the phase. + + + + Create a periodic square wave, starting with the high phase. + + The number of samples to generate. + Number of samples of the high phase. + Number of samples of the low phase. + Sample value to be emitted during the low phase. + Sample value to be emitted during the high phase. + Optional delay. + + + + Create an infinite periodic square wave sequence, starting with the high phase. + + Number of samples of the high phase. + Number of samples of the low phase. + Sample value to be emitted during the low phase. + Sample value to be emitted during the high phase. + Optional delay. + + + + Create a periodic triangle wave, starting with the raise phase from the lowest sample. + + The number of samples to generate. + Number of samples of the raise phase. + Number of samples of the fall phase. + Lowest sample value. + Highest sample value. + Optional delay. + + + + Create an infinite periodic triangle wave sequence, starting with the raise phase from the lowest sample. + + Number of samples of the raise phase. + Number of samples of the fall phase. + Lowest sample value. + Highest sample value. + Optional delay. + + + + Create a periodic sawtooth wave, starting with the lowest sample. + + The number of samples to generate. + Number of samples a full sawtooth period. + Lowest sample value. + Highest sample value. + Optional delay. + + + + Create an infinite periodic sawtooth wave sequence, starting with the lowest sample. + + Number of samples a full sawtooth period. + Lowest sample value. + Highest sample value. + Optional delay. + + + + Create an array with each field set to the same value. + + The number of samples to generate. + The value that each field should be set to. + + + + Create an infinite sequence where each element has the same value. + + The value that each element should be set to. + + + + Create a Heaviside Step sample vector. + + The number of samples to generate. + The maximal reached peak. + Offset to the time axis. + + + + Create an infinite Heaviside Step sample sequence. + + The maximal reached peak. + Offset to the time axis. + + + + Create a Kronecker Delta impulse sample vector. + + The number of samples to generate. + The maximal reached peak. + Offset to the time axis. Zero or positive. + + + + Create a Kronecker Delta impulse sample vector. + + The maximal reached peak. + Offset to the time axis, hence the sample index of the impulse. + + + + Create a periodic Kronecker Delta impulse sample vector. + + The number of samples to generate. + impulse sequence period. + The maximal reached peak. + Offset to the time axis. Zero or positive. + + + + Create a Kronecker Delta impulse sample vector. + + impulse sequence period. + The maximal reached peak. + Offset to the time axis. Zero or positive. + + + + Generate samples generated by the given computation. + + + + + Generate an infinite sequence generated by the given computation. + + + + + Create random samples, uniform between 0 and 1. + Faster than other methods but with reduced guarantees on randomness. + + + + + Create an infinite random sample sequence, uniform between 0 and 1. + Faster than other methods but with reduced guarantees on randomness. + + + + + Generate samples by sampling a function at samples from a probability distribution, uniform between 0 and 1. + Faster than other methods but with reduced guarantees on randomness. + + + + + Generate a sample sequence by sampling a function at samples from a probability distribution, uniform between 0 and 1. + Faster than other methods but with reduced guarantees on randomness. + + + + + Generate samples by sampling a function at sample pairs from a probability distribution, uniform between 0 and 1. + Faster than other methods but with reduced guarantees on randomness. + + + + + Generate a sample sequence by sampling a function at sample pairs from a probability distribution, uniform between 0 and 1. + Faster than other methods but with reduced guarantees on randomness. + + + + + Create samples with independent amplitudes of standard distribution. + + + + + Create an infinite sample sequence with independent amplitudes of standard distribution. + + + + + Create samples with independent amplitudes of normal distribution and a flat spectral density. + + + + + Create an infinite sample sequence with independent amplitudes of normal distribution and a flat spectral density. + + + + + Create samples with independent amplitudes of normal distribution and a flat spectral density. + + + + + Create an infinite sample sequence with independent amplitudes of normal distribution and a flat spectral density. + + + + + Create skew alpha stable samples. + + The number of samples to generate. + Stability alpha-parameter of the stable distribution + Skewness beta-parameter of the stable distribution + Scale c-parameter of the stable distribution + Location mu-parameter of the stable distribution + + + + Create skew alpha stable samples. + + Stability alpha-parameter of the stable distribution + Skewness beta-parameter of the stable distribution + Scale c-parameter of the stable distribution + Location mu-parameter of the stable distribution + + + + Create random samples. + + + + + Create an infinite random sample sequence. + + + + + Create random samples. + + + + + Create an infinite random sample sequence. + + + + + Create random samples. + + + + + Create an infinite random sample sequence. + + + + + Create random samples. + + + + + Create an infinite random sample sequence. + + + + + Generate samples by sampling a function at samples from a probability distribution. + + + + + Generate a sample sequence by sampling a function at samples from a probability distribution. + + + + + Generate samples by sampling a function at sample pairs from a probability distribution. + + + + + Generate a sample sequence by sampling a function at sample pairs from a probability distribution. + + + + + Globalized String Handling Helpers + + + + + Tries to get a from the format provider, + returning the current culture if it fails. + + + An that supplies culture-specific + formatting information. + + A instance. + + + + Tries to get a from the format + provider, returning the current culture if it fails. + + + An that supplies culture-specific + formatting information. + + A instance. + + + + Tries to get a from the format provider, returning the current culture if it fails. + + + An that supplies culture-specific + formatting information. + + A instance. + + + + Globalized Parsing: Tokenize a node by splitting it into several nodes. + + Node that contains the trimmed string to be tokenized. + List of keywords to tokenize by. + keywords to skip looking for (because they've already been handled). + + + + Globalized Parsing: Parse a double number + + First token of the number. + The parsed double number using the current culture information. + + + + + Globalized Parsing: Parse a float number + + First token of the number. + The parsed float number using the current culture information. + + + + + Calculates the R-Squared value, also known as coefficient of determination, + given modelled and observed values + + The values expected from the modelled + The actual data set values obtained + Squared Person product-momentum correlation coefficient. + + + + Calculates the R value, also known as linear correlation coefficient, + given modelled and observed values + + The values expected from the modelled + The actual data set values obtained + Person product-momentum correlation coefficient. + + + + Calculates the Standard Error of the regression, given a sequence of + modeled/predicted values, and a sequence of actual/observed values + + The modelled/predicted values + The observed/actual values + The Standard Error of the regression + + + + Calculates the Standard Error of the regression, given a sequence of + modeled/predicted values, and a sequence of actual/observed values + + The modelled/predicted values + The observed/actual values + The degrees of freedom by which the + number of samples is reduced for performing the Standard Error calculation + The Standard Error of the regression + + + + Complex Fast (FFT) Implementation of the Discrete Fourier Transform (DFT). + + + Complex Fast (FFT) Implementation of the Discrete Fourier Transform (DFT). + + + Complex Fast (FFT) Implementation of the Discrete Fourier Transform (DFT). + + + Complex Fast (FFT) Implementation of the Discrete Fourier Transform (DFT). + + + + + Sequences with length greater than Math.Sqrt(Int32.MaxValue) + 1 + will cause k*k in the Bluestein sequence to overflow (GH-286). + + + + + Generate the bluestein sequence for the provided problem size. + + Number of samples. + Bluestein sequence exp(I*Pi*k^2/N) + + + + Convolution with the bluestein sequence (Parallel Version). + + Sample Vector. + + + + Swap the real and imaginary parts of each sample. + + Sample Vector. + + + + Bluestein generic FFT for arbitrary sized sample vectors. + + Time-space sample vector. + Fourier series exponent sign. + + + + Applies the forward Fast Fourier Transform (FFT) to arbitrary-length sample vectors. + + Sample vector, where the FFT is evaluated in place. + + + + Applies the forward Fast Fourier Transform (FFT) to arbitrary-length sample vectors. + + Sample vector, where the FFT is evaluated in place. + Fourier Transform Convention Options. + + + + Applies the forward Fast Fourier Transform (FFT) to arbitrary-length sample vectors. + + Real part of the sample vector, where the FFT is evaluated in place. + Imaginary part of the sample vector, where the FFT is evaluated in place. + Fourier Transform Convention Options. + + + + Packed Real-Complex forward Fast Fourier Transform (FFT) to arbitrary-length sample vectors. + Since for real-valued time samples the complex spectrum is conjugate-even (symmetry), + the spectrum can be fully reconstructed form the positive frequencies only (first half). + The data array needs to be N+2 (if N is even) or N+1 (if N is odd) long in order to support such a packed spectrum. + + Data array of length N+2 (if N is even) or N+1 (if N is odd). + The number of samples. + Fourier Transform Convention Options. + + + + Applies the forward Fast Fourier Transform (FFT) to multiple dimensional sample data. + + Sample data, where the FFT is evaluated in place. + + The data size per dimension. The first dimension is the major one. + For example, with two dimensions "rows" and "columns" the samples are assumed to be organized row by row. + + Fourier Transform Convention Options. + + + + Applies the forward Fast Fourier Transform (FFT) to two dimensional sample data. + + Sample data, organized row by row, where the FFT is evaluated in place + The number of rows. + The number of columns. + Data available organized column by column instead of row by row can be processed directly by swapping the rows and columns arguments. + Fourier Transform Convention Options. + + + + Applies the forward Fast Fourier Transform (FFT) to a two dimensional data in form of a matrix. + + Sample matrix, where the FFT is evaluated in place + Fourier Transform Convention Options. + + + + Applies the inverse Fast Fourier Transform (iFFT) to arbitrary-length sample vectors. + + Spectrum data, where the iFFT is evaluated in place. + + + + Applies the inverse Fast Fourier Transform (iFFT) to arbitrary-length sample vectors. + + Spectrum data, where the iFFT is evaluated in place. + Fourier Transform Convention Options. + + + + Applies the inverse Fast Fourier Transform (iFFT) to arbitrary-length sample vectors. + + Real part of the sample vector, where the iFFT is evaluated in place. + Imaginary part of the sample vector, where the iFFT is evaluated in place. + Fourier Transform Convention Options. + + + + Packed Real-Complex inverse Fast Fourier Transform (iFFT) to arbitrary-length sample vectors. + Since for real-valued time samples the complex spectrum is conjugate-even (symmetry), + the spectrum can be fully reconstructed form the positive frequencies only (first half). + The data array needs to be N+2 (if N is even) or N+1 (if N is odd) long in order to support such a packed spectrum. + + Data array of length N+2 (if N is even) or N+1 (if N is odd). + The number of samples. + Fourier Transform Convention Options. + + + + Applies the inverse Fast Fourier Transform (iFFT) to multiple dimensional sample data. + + Spectrum data, where the iFFT is evaluated in place. + + The data size per dimension. The first dimension is the major one. + For example, with two dimensions "rows" and "columns" the samples are assumed to be organized row by row. + + Fourier Transform Convention Options. + + + + Applies the inverse Fast Fourier Transform (iFFT) to two dimensional sample data. + + Sample data, organized row by row, where the iFFT is evaluated in place + The number of rows. + The number of columns. + Data available organized column by column instead of row by row can be processed directly by swapping the rows and columns arguments. + Fourier Transform Convention Options. + + + + Applies the inverse Fast Fourier Transform (iFFT) to a two dimensional data in form of a matrix. + + Sample matrix, where the iFFT is evaluated in place + Fourier Transform Convention Options. + + + + Naive forward DFT, useful e.g. to verify faster algorithms. + + Time-space sample vector. + Fourier Transform Convention Options. + Corresponding frequency-space vector. + + + + Naive inverse DFT, useful e.g. to verify faster algorithms. + + Frequency-space sample vector. + Fourier Transform Convention Options. + Corresponding time-space vector. + + + + Radix-2 forward FFT for power-of-two sized sample vectors. + + Sample vector, where the FFT is evaluated in place. + Fourier Transform Convention Options. + + + + + Radix-2 inverse FFT for power-of-two sized sample vectors. + + Sample vector, where the FFT is evaluated in place. + Fourier Transform Convention Options. + + + + + Bluestein forward FFT for arbitrary sized sample vectors. + + Sample vector, where the FFT is evaluated in place. + Fourier Transform Convention Options. + + + + Bluestein inverse FFT for arbitrary sized sample vectors. + + Sample vector, where the FFT is evaluated in place. + Fourier Transform Convention Options. + + + + Extract the exponent sign to be used in forward transforms according to the + provided convention options. + + Fourier Transform Convention Options. + Fourier series exponent sign. + + + + Rescale FFT-the resulting vector according to the provided convention options. + + Fourier Transform Convention Options. + Sample Vector. + + + + Rescale the iFFT-resulting vector according to the provided convention options. + + Fourier Transform Convention Options. + Sample Vector. + + + + Generate the frequencies corresponding to each index in frequency space. + The frequency space has a resolution of sampleRate/N. + Index 0 corresponds to the DC part, the following indices correspond to + the positive frequencies up to the Nyquist frequency (sampleRate/2), + followed by the negative frequencies wrapped around. + + Number of samples. + The sampling rate of the time-space data. + + + + Naive generic DFT, useful e.g. to verify faster algorithms. + + Time-space sample vector. + Fourier series exponent sign. + Corresponding frequency-space vector. + + + + Radix-2 Reorder Helper Method + + Sample type + Sample vector + + + + Radix-2 Step Helper Method + + Sample vector. + Fourier series exponent sign. + Level Group Size. + Index inside of the level. + + + + Radix-2 generic FFT for power-of-two sized sample vectors. + + Sample vector, where the FFT is evaluated in place. + Fourier series exponent sign. + + + + + Radix-2 generic FFT for power-of-two sample vectors (Parallel Version). + + Sample vector, where the FFT is evaluated in place. + Fourier series exponent sign. + + + + + Fourier Transform Convention + + + + + Inverse integrand exponent (forward: positive sign; inverse: negative sign). + + + + + Only scale by 1/N in the inverse direction; No scaling in forward direction. + + + + + Don't scale at all (neither on forward nor on inverse transformation). + + + + + Universal; Symmetric scaling and common exponent (used in Maple). + + + + + Only scale by 1/N in the inverse direction; No scaling in forward direction (used in Matlab). [= AsymmetricScaling] + + + + + Inverse integrand exponent; No scaling at all (used in all Numerical Recipes based implementations). [= InverseExponent | NoScaling] + + + + + Fast (FHT) Implementation of the Discrete Hartley Transform (DHT). + + + Fast (FHT) Implementation of the Discrete Hartley Transform (DHT). + + + + + Naive forward DHT, useful e.g. to verify faster algorithms. + + Time-space sample vector. + Hartley Transform Convention Options. + Corresponding frequency-space vector. + + + + Naive inverse DHT, useful e.g. to verify faster algorithms. + + Frequency-space sample vector. + Hartley Transform Convention Options. + Corresponding time-space vector. + + + + Rescale FFT-the resulting vector according to the provided convention options. + + Fourier Transform Convention Options. + Sample Vector. + + + + Rescale the iFFT-resulting vector according to the provided convention options. + + Fourier Transform Convention Options. + Sample Vector. + + + + Naive generic DHT, useful e.g. to verify faster algorithms. + + Time-space sample vector. + Corresponding frequency-space vector. + + + + Hartley Transform Convention + + + + + Only scale by 1/N in the inverse direction; No scaling in forward direction. + + + + + Don't scale at all (neither on forward nor on inverse transformation). + + + + + Universal; Symmetric scaling. + + + + + Numerical Integration (Quadrature). + + + + + Approximation of the definite integral of an analytic smooth function on a closed interval. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + The expected relative accuracy of the approximation. + Approximation of the finite integral in the given interval. + + + + Approximation of the definite integral of an analytic smooth function on a closed interval. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Approximation of the finite integral in the given interval. + + + + Approximates a 2-dimensional definite integral using an Nth order Gauss-Legendre rule over the rectangle [a,b] x [c,d]. + + The 2-dimensional analytic smooth function to integrate. + Where the interval starts for the first (inside) integral, exclusive and finite. + Where the interval ends for the first (inside) integral, exclusive and finite. + Where the interval starts for the second (outside) integral, exclusive and finite. + /// Where the interval ends for the second (outside) integral, exclusive and finite. + Defines an Nth order Gauss-Legendre rule. The order also defines the number of abscissas and weights for the rule. Precomputed Gauss-Legendre abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024 are used, otherwise they're calulcated on the fly. + Approximation of the finite integral in the given interval. + + + + Approximates a 2-dimensional definite integral using an Nth order Gauss-Legendre rule over the rectangle [a,b] x [c,d]. + + The 2-dimensional analytic smooth function to integrate. + Where the interval starts for the first (inside) integral, exclusive and finite. + Where the interval ends for the first (inside) integral, exclusive and finite. + Where the interval starts for the second (outside) integral, exclusive and finite. + /// Where the interval ends for the second (outside) integral, exclusive and finite. + Approximation of the finite integral in the given interval. + + + + Analytic integration algorithm for smooth functions with no discontinuities + or derivative discontinuities and no poles inside the interval. + + + + + Maximum number of iterations, until the asked + maximum error is (likely to be) satisfied. + + + + + Approximate the integral by the double exponential transformation + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + The expected relative accuracy of the approximation. + Approximation of the finite integral in the given interval. + + + + Compute the abscissa vector for a single level. + + The level to evaluate the abscissa vector for. + Abscissa Vector. + + + + Compute the weight vector for a single level. + + The level to evaluate the weight vector for. + Weight Vector. + + + + Precomputed abscissa vector per level. + + + + + Precomputed weight vector per level. + + + + + Approximates a definite integral using an Nth order Gauss-Legendre rule. Precomputed Gauss-Legendre abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024 are used, otherwise they're calulcated on the fly. + + + + + Initializes a new instance of the class. + + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Defines an Nth order Gauss-Legendre rule. The order also defines the number of abscissas and weights for the rule. Precomputed Gauss-Legendre abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024 are used, otherwise they're calulcated on the fly. + + + + Gettter for the ith abscissa. + + Index of the ith abscissa. + The ith abscissa. + + + + Getter that returns a clone of the array containing the abscissas. + + + + + Getter for the ith weight. + + Index of the ith weight. + The ith weight. + + + + Getter that returns a clone of the array containing the weights. + + + + + Getter for the order. + + + + + Getter for the InvervalBegin. + + + + + Getter for the InvervalEnd. + + + + + Approximates a definite integral using an Nth order Gauss-Legendre rule. + + The analytic smooth function to integrate. + Where the interval starts, exclusive and finite. + Where the interval ends, exclusive and finite. + Defines an Nth order Gauss-Legendre rule. The order also defines the number of abscissas and weights for the rule. Precomputed Gauss-Legendre abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024 are used, otherwise they're calulcated on the fly. + Approximation of the finite integral in the given interval. + + + + Approximates a 2-dimensional definite integral using an Nth order Gauss-Legendre rule over the rectangle [a,b] x [c,d]. + + The 2-dimensional analytic smooth function to integrate. + Where the interval starts for the first (inside) integral, exclusive and finite. + Where the interval ends for the first (inside) integral, exclusive and finite. + Where the interval starts for the second (outside) integral, exclusive and finite. + /// Where the interval ends for the second (outside) integral, exclusive and finite. + Defines an Nth order Gauss-Legendre rule. The order also defines the number of abscissas and weights for the rule. Precomputed Gauss-Legendre abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024 are used, otherwise they're calulcated on the fly. + Approximation of the finite integral in the given interval. + + + + Contains a method to compute the Gauss-Legendre abscissas/weights and precomputed abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024. + + + Contains a method to compute the Gauss-Legendre abscissas/weights and precomputed abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024. + + + + + Precomputed abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024. + + + + + Computes the Gauss-Legendre abscissas/weights. + See Pavel Holoborodko for a description of the algorithm. + + Defines an Nth order Gauss-Legendre rule. The order also defines the number of abscissas and weights for the rule. + Required precision to compute the abscissas/weights. 1e-10 is usually fine. + Object containing the non-negative abscissas/weights, order, and intervalBegin/intervalEnd. The non-negative abscissas/weights are generated over the interval [-1,1] for the given order. + + + + Creates and maps a Gauss-Legendre point. + + + + + Getter for the GaussPoint. + + Defines an Nth order Gauss-Legendre rule. Precomputed Gauss-Legendre abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024 are used, otherwise they're calulcated on the fly. + Object containing the non-negative abscissas/weights, order, and intervalBegin/intervalEnd. The non-negative abscissas/weights are generated over the interval [-1,1] for the given order. + + + + Getter for the GaussPoint. + + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Defines an Nth order Gauss-Legendre rule. Precomputed Gauss-Legendre abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024 are used, otherwise they're calulcated on the fly. + Object containing the abscissas/weights, order, and intervalBegin/intervalEnd. + + + + Maps the non-negative abscissas/weights from the interval [-1, 1] to the interval [intervalBegin, intervalEnd]. + + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Object containing the non-negative abscissas/weights, order, and intervalBegin/intervalEnd. The non-negative abscissas/weights are generated over the interval [-1,1] for the given order. + Object containing the abscissas/weights, order, and intervalBegin/intervalEnd. + + + + Contains the abscissas/weights, order, and intervalBegin/intervalEnd. + + + + + Approximation algorithm for definite integrals by the Trapezium rule of the Newton-Cotes family. + + + Wikipedia - Trapezium Rule + + + + + Direct 2-point approximation of the definite integral in the provided interval by the trapezium rule. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Approximation of the finite integral in the given interval. + + + + Composite N-point approximation of the definite integral in the provided interval by the trapezium rule. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Number of composite subdivision partitions. + Approximation of the finite integral in the given interval. + + + + Adaptive approximation of the definite integral in the provided interval by the trapezium rule. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + The expected accuracy of the approximation. + Approximation of the finite integral in the given interval. + + + + Adaptive approximation of the definite integral by the trapezium rule. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Abscissa vector per level provider. + Weight vector per level provider. + First Level Step + The expected relative accuracy of the approximation. + Approximation of the finite integral in the given interval. + + + + Approximation algorithm for definite integrals by Simpson's rule. + + + + + Direct 3-point approximation of the definite integral in the provided interval by Simpson's rule. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Approximation of the finite integral in the given interval. + + + + Composite N-point approximation of the definite integral in the provided interval by Simpson's rule. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Even number of composite subdivision partitions. + Approximation of the finite integral in the given interval. + + + + Interpolation Factory. + + + + + Creates an interpolation based on arbitrary points. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.Barycentric.InterpolateRationalFloaterHormannSorted + instead, which is more efficient. + + + + + Create a floater hormann rational pole-free interpolation based on arbitrary points. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.Barycentric.InterpolateRationalFloaterHormannSorted + instead, which is more efficient. + + + + + Create a Bulirsch Stoer rational interpolation based on arbitrary points. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.BulirschStoerRationalInterpolation.InterpolateSorted + instead, which is more efficient. + + + + + Create a barycentric polynomial interpolation where the given sample points are equidistant. + + The sample points t, must be equidistant. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.Barycentric.InterpolatePolynomialEquidistantSorted + instead, which is more efficient. + + + + + Create a Neville polynomial interpolation based on arbitrary points. + If the points happen to be equidistant, consider to use the much more robust PolynomialEquidistant instead. + Otherwise, consider whether RationalWithoutPoles would not be a more robust alternative. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.NevillePolynomialInterpolation.InterpolateSorted + instead, which is more efficient. + + + + + Create a piecewise linear interpolation based on arbitrary points. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.LinearSpline.InterpolateSorted + instead, which is more efficient. + + + + + Create piecewise log-linear interpolation based on arbitrary points. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.LogLinear.InterpolateSorted + instead, which is more efficient. + + + + + Create an piecewise natural cubic spline interpolation based on arbitrary points, + with zero secondary derivatives at the boundaries. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.CubicSpline.InterpolateNaturalSorted + instead, which is more efficient. + + + + + Create an piecewise cubic Akima spline interpolation based on arbitrary points. + Akima splines are robust to outliers. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.CubicSpline.InterpolateAkimaSorted + instead, which is more efficient. + + + + + Create a piecewise cubic Hermite spline interpolation based on arbitrary points + and their slopes/first derivative. + + The sample points t. + The sample point values x(t). + The slope at the sample points. Optimized for arrays. + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.CubicSpline.InterpolateHermiteSorted + instead, which is more efficient. + + + + + Create a step-interpolation based on arbitrary points. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.StepInterpolation.InterpolateSorted + instead, which is more efficient. + + + + + Barycentric Interpolation Algorithm. + + Supports neither differentiation nor integration. + + + Sample points (N), sorted ascendingly. + Sample values (N), sorted ascendingly by x. + Barycentric weights (N), sorted ascendingly by x. + + + + Create a barycentric polynomial interpolation from a set of (x,y) value pairs with equidistant x, sorted ascendingly by x. + + + + + Create a barycentric polynomial interpolation from an unordered set of (x,y) value pairs with equidistant x. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a barycentric polynomial interpolation from an unsorted set of (x,y) value pairs with equidistant x. + + + + + Create a barycentric polynomial interpolation from a set of values related to linearly/equidistant spaced points within an interval. + + + + + Create a barycentric rational interpolation without poles, using Mike Floater and Kai Hormann's Algorithm. + The values are assumed to be sorted ascendingly by x. + + Sample points (N), sorted ascendingly. + Sample values (N), sorted ascendingly by x. + + Order of the interpolation scheme, 0 <= order <= N. + In most cases a value between 3 and 8 gives good results. + + + + + Create a barycentric rational interpolation without poles, using Mike Floater and Kai Hormann's Algorithm. + WARNING: Works in-place and can thus causes the data array to be reordered. + + Sample points (N), no sorting assumed. + Sample values (N). + + Order of the interpolation scheme, 0 <= order <= N. + In most cases a value between 3 and 8 gives good results. + + + + + Create a barycentric rational interpolation without poles, using Mike Floater and Kai Hormann's Algorithm. + + Sample points (N), no sorting assumed. + Sample values (N). + + Order of the interpolation scheme, 0 <= order <= N. + In most cases a value between 3 and 8 gives good results. + + + + + Create a barycentric rational interpolation without poles, using Mike Floater and Kai Hormann's Algorithm. + The values are assumed to be sorted ascendingly by x. + + Sample points (N), sorted ascendingly. + Sample values (N), sorted ascendingly by x. + + + + Create a barycentric rational interpolation without poles, using Mike Floater and Kai Hormann's Algorithm. + WARNING: Works in-place and can thus causes the data array to be reordered. + + Sample points (N), no sorting assumed. + Sample values (N). + + + + Create a barycentric rational interpolation without poles, using Mike Floater and Kai Hormann's Algorithm. + + Sample points (N), no sorting assumed. + Sample values (N). + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. NOT SUPPORTED. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. NOT SUPPORTED. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. NOT SUPPORTED. + + Point t to integrate at. + + + + Definite integral between points a and b. NOT SUPPORTED. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Rational Interpolation (with poles) using Roland Bulirsch and Josef Stoer's Algorithm. + + + + This algorithm supports neither differentiation nor integration. + + + + + Sample Points t, sorted ascendingly. + Sample Values x(t), sorted ascendingly by x. + + + + Create a Bulirsch-Stoer rational interpolation from a set of (x,y) value pairs, sorted ascendingly by x. + + + + + Create a Bulirsch-Stoer rational interpolation from an unsorted set of (x,y) value pairs. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a Bulirsch-Stoer rational interpolation from an unsorted set of (x,y) value pairs. + + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. NOT SUPPORTED. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. NOT SUPPORTED. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. NOT SUPPORTED. + + Point t to integrate at. + + + + Definite integral between points a and b. NOT SUPPORTED. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Cubic Spline Interpolation. + + Supports both differentiation and integration. + + + sample points (N+1), sorted ascending + Zero order spline coefficients (N) + First order spline coefficients (N) + second order spline coefficients (N) + third order spline coefficients (N) + + + + Create a hermite cubic spline interpolation from a set of (x,y) value pairs and their slope (first derivative), sorted ascendingly by x. + + + + + Create a hermite cubic spline interpolation from an unsorted set of (x,y) value pairs and their slope (first derivative). + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a hermite cubic spline interpolation from an unsorted set of (x,y) value pairs and their slope (first derivative). + + + + + Create an Akima cubic spline interpolation from a set of (x,y) value pairs, sorted ascendingly by x. + Akima splines are robust to outliers. + + + + + Create an Akima cubic spline interpolation from an unsorted set of (x,y) value pairs. + Akima splines are robust to outliers. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create an Akima cubic spline interpolation from an unsorted set of (x,y) value pairs. + Akima splines are robust to outliers. + + + + + Create a cubic spline interpolation from a set of (x,y) value pairs, sorted ascendingly by x, + and custom boundary/termination conditions. + + + + + Create a cubic spline interpolation from an unsorted set of (x,y) value pairs and custom boundary/termination conditions. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a cubic spline interpolation from an unsorted set of (x,y) value pairs and custom boundary/termination conditions. + + + + + Create a natural cubic spline interpolation from a set of (x,y) value pairs + and zero second derivatives at the two boundaries, sorted ascendingly by x. + + + + + Create a natural cubic spline interpolation from an unsorted set of (x,y) value pairs + and zero second derivatives at the two boundaries. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a natural cubic spline interpolation from an unsorted set of (x,y) value pairs + and zero second derivatives at the two boundaries. + + + + + Three-Point Differentiation Helper. + + Sample Points t. + Sample Values x(t). + Index of the point of the differentiation. + Index of the first sample. + Index of the second sample. + Index of the third sample. + The derivative approximation. + + + + Tridiagonal Solve Helper. + + The a-vector[n]. + The b-vector[n], will be modified by this function. + The c-vector[n]. + The d-vector[n], will be modified by this function. + The x-vector[n] + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. + + Point t to integrate at. + + + + Definite integral between points a and b. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Find the index of the greatest sample point smaller than t, + or the left index of the closest segment for extrapolation. + + + + + Interpolation within the range of a discrete set of known data points. + + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. + + Point t to integrate at. + + + + Definite integral between points a and b. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Piece-wise Linear Interpolation. + + Supports both differentiation and integration. + + + Sample points (N+1), sorted ascending + Sample values (N or N+1) at the corresponding points; intercept, zero order coefficients + Slopes (N) at the sample points (first order coefficients): N + + + + Create a linear spline interpolation from a set of (x,y) value pairs, sorted ascendingly by x. + + + + + Create a linear spline interpolation from an unsorted set of (x,y) value pairs. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a linear spline interpolation from an unsorted set of (x,y) value pairs. + + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. + + Point t to integrate at. + + + + Definite integral between points a and b. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Find the index of the greatest sample point smaller than t, + or the left index of the closest segment for extrapolation. + + + + + Piece-wise Log-Linear Interpolation + + This algorithm supports differentiation, not integration. + + + + Internal Spline Interpolation + + + + Sample points (N), sorted ascending + Natural logarithm of the sample values (N) at the corresponding points + + + + Create a piecewise log-linear interpolation from a set of (x,y) value pairs, sorted ascendingly by x. + + + + + Create a piecewise log-linear interpolation from an unsorted set of (x,y) value pairs. + WARNING: Works in-place and can thus causes the data array to be reordered and modified. + + + + + Create a piecewise log-linear interpolation from an unsorted set of (x,y) value pairs. + + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. + + Point t to integrate at. + + + + Definite integral between points a and b. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Lagrange Polynomial Interpolation using Neville's Algorithm. + + + + This algorithm supports differentiation, but doesn't support integration. + + + When working with equidistant or Chebyshev sample points it is + recommended to use the barycentric algorithms specialized for + these cases instead of this arbitrary Neville algorithm. + + + + + Sample Points t, sorted ascendingly. + Sample Values x(t), sorted ascendingly by x. + + + + Create a Neville polynomial interpolation from a set of (x,y) value pairs, sorted ascendingly by x. + + + + + Create a Neville polynomial interpolation from an unsorted set of (x,y) value pairs. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a Neville polynomial interpolation from an unsorted set of (x,y) value pairs. + + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. NOT SUPPORTED. + + Point t to integrate at. + + + + Definite integral between points a and b. NOT SUPPORTED. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Quadratic Spline Interpolation. + + Supports both differentiation and integration. + + + sample points (N+1), sorted ascending + Zero order spline coefficients (N) + First order spline coefficients (N) + second order spline coefficients (N) + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. + + Point t to integrate at. + + + + Definite integral between points a and b. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Find the index of the greatest sample point smaller than t, + or the left index of the closest segment for extrapolation. + + + + + Left and right boundary conditions. + + + + + Natural Boundary (Zero second derivative). + + + + + Parabolically Terminated boundary. + + + + + Fixed first derivative at the boundary. + + + + + Fixed second derivative at the boundary. + + + + + A step function where the start of each segment is included, and the last segment is open-ended. + Segment i is [x_i, x_i+1) for i < N, or [x_i, infinity] for i = N. + The domain of the function is all real numbers, such that y = 0 where x <. + + Supports both differentiation and integration. + + + Sample points (N), sorted ascending + Samples values (N) of each segment starting at the corresponding sample point. + + + + Create a linear spline interpolation from a set of (x,y) value pairs, sorted ascendingly by x. + + + + + Create a linear spline interpolation from an unsorted set of (x,y) value pairs. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a linear spline interpolation from an unsorted set of (x,y) value pairs. + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. + + Point t to integrate at. + + + + Definite integral between points a and b. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Find the index of the greatest sample point smaller than t. + + + + + Wraps an interpolation with a transformation of the interpolated values. + + Neither differentiation nor integration is supported. + + + + Create a linear spline interpolation from a set of (x,y) value pairs, sorted ascendingly by x. + + + + + Create a linear spline interpolation from an unsorted set of (x,y) value pairs. + WARNING: Works in-place and can thus causes the data array to be reordered and modified. + + + + + Create a linear spline interpolation from an unsorted set of (x,y) value pairs. + + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. NOT SUPPORTED. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. NOT SUPPORTED. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. NOT SUPPORTED. + + Point t to integrate at. + + + + Definite integral between points a and b. NOT SUPPORTED. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + A Matrix class with dense storage. The underlying storage is a one dimensional array in column-major order (column by column). + + + + + Number of rows. + + Using this instead of the RowCount property to speed up calculating + a matrix index in the data array. + + + + Number of columns. + + Using this instead of the ColumnCount property to speed up calculating + a matrix index in the data array. + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new dense matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new dense matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to be in column-major order (column by column) and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + + Create a new dense matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable. + The enumerable is assumed to be in column-major order (column by column). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix and initialize each value to the same provided value. + + + + + Create a new dense matrix and initialize each value using the provided init function. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new dense matrix with values sampled from the provided random distribution. + + + + + Gets the matrix's data. + + The matrix's data. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of add + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the matrix and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Evaluates whether this matrix is symmetric. + + + + + A vector using dense storage. + + + + + Number of elements + + + + + Gets the vector's data. + + + + + Create a new dense vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new dense vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new dense vector directly binding to a raw array. + The array is used directly without copying. + Very efficient, but changes to the array and the vector will affect each other. + + + + + Create a new dense vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector and initialize each value using the provided value. + + + + + Create a new dense vector and initialize each value using the provided init function. + + + + + Create a new dense vector with values sampled from the provided random distribution. + + + + + Gets the vector's data. + + The vector's data. + + + + Returns a reference to the internal data structure. + + The DenseVector whose internal data we are + returning. + + A reference to the internal date of the given vector. + + + + + Returns a vector bound directly to a reference of the provided array. + + The array to bind to the DenseVector object. + + A DenseVector whose values are bound to the given array. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + The scalar to add. + The vector to store the result of the addition. + + + + Adds another vector to this vector and stores the result into the result vector. + + The vector to add to this one. + The vector to store the result of the addition. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The vector to store the result of the subtraction. + + + + Subtracts another vector to this vector and stores the result into the result vector. + + The vector to subtract from this one. + The vector to store the result of the subtraction. + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Negates vector and saves result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + The scalar to multiply. + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Multiplies a vector with a scalar. + + The vector to scale. + The scalar value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a scalar. + + The scalar value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a scalar. + + The vector to divide. + The scalar value. + The result of the division. + If is . + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The divisor to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The divisor to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + of each element of the vector of the given divisor. + + The vector whose elements we want to compute the remainder of. + The divisor to use, + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Creates a double dense vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n,n,..', '(n,n,..)', '[n,n,...]', where n is a double. + + + A double dense vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a real dense vector to double-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a real dense vector to double-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + A matrix type for diagonal matrices. + + + Diagonal matrices can be non-square matrices but the diagonal always starts + at element 0,0. A diagonal matrix will throw an exception if non diagonal + entries are set. The exception to this is when the off diagonal elements are + 0.0 or NaN; these settings will cause no change to the diagonal matrix. + + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new diagonal matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All diagonal cells of the matrix will be initialized to the provided value, all non-diagonal ones to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to contain the diagonal elements only and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new diagonal matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + The matrix to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + The array to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided enumerable. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new diagonal matrix with diagonal values sampled from the provided random distribution. + + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + If the result matrix's dimensions are not the same as this matrix. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to add. + The matrix to store the result of the division. + + + + Computes the determinant of this matrix. + + The determinant of this matrix. + + + + Returns the elements of the diagonal in a . + + The elements of the diagonal. + For non-square matrices, the method returns Min(Rows, Columns) elements where + i == j (i is the row index, and j is the column index). + + + + Copies the values of the given array to the diagonal. + + The array to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + + Copies the values of the given to the diagonal. + + The vector to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced L2 norm of the matrix. + The largest singular value of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + Calculates the condition number of this matrix. + The condition number of the matrix. + + + Computes the inverse of this matrix. + If is not a square matrix. + If is singular. + The inverse of this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Creates a matrix that contains the values from the requested sub-matrix. + + The row to start copying from. + The number of rows to copy. Must be positive. + The column to start copying from. + The number of columns to copy. Must be positive. + The requested sub-matrix. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + If or + is not positive. + + + + Permute the columns of a matrix according to a permutation. + + The column permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Permute the rows of a matrix according to a permutation. + + The row permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Evaluates whether this matrix is symmetric. + + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + A class which encapsulates the functionality of a Cholesky factorization. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Gets the determinant of the matrix for which the Cholesky matrix was computed. + + + + + Gets the log determinant of the matrix for which the Cholesky matrix was computed. + + + + + A class which encapsulates the functionality of a Cholesky factorization for dense matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Symmetric Householder reduction to tridiagonal form. + + Data array of matrix V (eigenvectors) + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tred2 by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + Data array of matrix V (eigenvectors) + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Nonsymmetric reduction to Hessenberg form. + + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Complex scalar division X/Y. + + Real part of X + Imaginary part of X + Real part of Y + Imaginary part of Y + Division result as a number. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an orthogonal matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Factorize matrix using the modified Gram-Schmidt method. + + Initial matrix. On exit is replaced by Q. + Number of rows in Q. + Number of columns in Q. + On exit is filled by R. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Gets or sets Tau vector. Contains additional information on Q - used for native solver. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The type of QR factorization to perform. + If is null. + If row count is less then column count + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + If SVD algorithm failed to converge with matrix . + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + Matrix V is encoded in the property EigenVectors in the way that: + - column corresponding to real eigenvalue represents real eigenvector, + - columns corresponding to the pair of complex conjugate eigenvalues + lambda[i] and lambda[i+1] encode real and imaginary parts of eigenvectors. + + + + + Gets the absolute value of determinant of the square matrix for which the EVD was computed. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + In the Math.Net implementation we also store a set of pivot elements for increased + numerical stability. The pivot elements encode a permutation matrix P such that P*A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Gets the determinant of the matrix for which the LU factorization was computed. + + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A (m x n) may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + If a factorization is performed, the resulting Q matrix is an m x m matrix + and the R matrix is an m x n matrix. If a factorization is performed, the + resulting Q matrix is an m x n matrix and the R matrix is an n x n matrix. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD). + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets the two norm of the . + + The 2-norm of the . + + + + Gets the condition number max(S) / min(S) + + The condition number. + + + + Gets the determinant of the square matrix for which the SVD was computed. + + + + + A class which encapsulates the functionality of a Cholesky factorization for user matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Symmetric Householder reduction to tridiagonal form. + + The eigen vectors to work on. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tred2 by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + The eigen vectors to work on. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Nonsymmetric reduction to Hessenberg form. + + The eigen vectors to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + The eigen vectors to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Complex scalar division X/Y. + + Real part of X + Imaginary part of X + Real part of Y + Imaginary part of Y + Division result as a number. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an orthogonal matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The QR factorization method to use. + If is null. + + + + Generate column from initial matrix to work array + + Initial matrix + The first row + Column index + Generated vector + + + + Perform calculation of Q or R + + Work array + Q or R matrices + The first row + The last row + The first column + The last column + Number of available CPUs + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + + + + + Calculates absolute value of multiplied on signum function of + + Double value z1 + Double value z2 + Result multiplication of signum function and absolute value + + + + Swap column and + + Source matrix + The number of rows in + Column A index to swap + Column B index to swap + + + + Scale column by starting from row + + Source matrix + The number of rows in + Column to scale + Row to scale from + Scale value + + + + Scale vector by starting from index + + Source vector + Row to scale from + Scale value + + + + Given the Cartesian coordinates (da, db) of a point p, these fucntion return the parameters da, db, c, and s + associated with the Givens rotation that zeros the y-coordinate of the point. + + Provides the x-coordinate of the point p. On exit contains the parameter r associated with the Givens rotation + Provides the y-coordinate of the point p. On exit contains the parameter z associated with the Givens rotation + Contains the parameter c associated with the Givens rotation + Contains the parameter s associated with the Givens rotation + This is equivalent to the DROTG LAPACK routine. + + + + Calculate Norm 2 of the column in matrix starting from row + + Source matrix + The number of rows in + Column index + Start row index + Norm2 (Euclidean norm) of the column + + + + Calculate Norm 2 of the vector starting from index + + Source vector + Start index + Norm2 (Euclidean norm) of the vector + + + + Calculate dot product of and + + Source matrix + The number of rows in + Index of column A + Index of column B + Starting row index + Dot product value + + + + Performs rotation of points in the plane. Given two vectors x and y , + each vector element of these vectors is replaced as follows: x(i) = c*x(i) + s*y(i); y(i) = c*y(i) - s*x(i) + + Source matrix + The number of rows in + Index of column A + Index of column B + Scalar "c" value + Scalar "s" value + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + double version of the class. + + + + + Initializes a new instance of the Matrix class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Returns the conjugate transpose of this matrix. + + The conjugate transpose of this matrix. + + + + Puts the conjugate transpose of this matrix into the result matrix. + + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to divide by each element of the matrix. + The matrix to store the result of the division. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The matrix to store the result of the pointwise power. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Computes the Moore-Penrose Pseudo-Inverse of this matrix. + + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Calculates the p-norms of all row vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the p-norms of all column vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all row vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all column vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the value sum of each row vector. + + + + + Calculates the absolute value sum of each row vector. + + + + + Calculates the value sum of each column vector. + + + + + Calculates the absolute value sum of each column vector. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Bi-Conjugate Gradient Stabilized (BiCGStab) solver is an 'improvement' + of the standard Conjugate Gradient (CG) solver. Unlike the CG solver the + BiCGStab can be used on non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The Bi-CGSTAB algorithm was taken from:
+ Templates for the solution of linear systems: Building blocks + for iterative methods +
+ Richard Barrett, Michael Berry, Tony F. Chan, James Demmel, + June M. Donato, Jack Dongarra, Victor Eijkhout, Roldan Pozo, + Charles Romine and Henk van der Vorst +
+ Url: http://www.netlib.org/templates/Templates.html +
+ Algorithm is described in Chapter 2, section 2.3.8, page 27 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient , A. + The solution , b. + The result , x. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A composite matrix solver. The actual solver is made by a sequence of + matrix solvers. + + + + Solver based on:
+ Faster PDE-based simulations using robust composite linear solvers
+ S. Bhowmicka, P. Raghavan a,*, L. McInnes b, B. Norris
+ Future Generation Computer Systems, Vol 20, 2004, pp 373–387
+
+ + Note that if an iterator is passed to this solver it will be used for all the sub-solvers. + +
+
+ + + The collection of solvers that will be used + + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A diagonal preconditioner. The preconditioner uses the inverse + of the matrix diagonal as preconditioning values. + + + + + The inverse of the matrix diagonal. + + + + + Returns the decomposed matrix diagonal. + + The matrix diagonal. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + A Generalized Product Bi-Conjugate Gradient iterative matrix solver. + + + + The Generalized Product Bi-Conjugate Gradient (GPBiCG) solver is an + alternative version of the Bi-Conjugate Gradient stabilized (CG) solver. + Unlike the CG solver the GPBiCG solver can be used on + non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The GPBiCG algorithm was taken from:
+ GPBiCG(m,l): A hybrid of BiCGSTAB and GPBiCG methods with + efficiency and robustness +
+ S. Fujino +
+ Applied Numerical Mathematics, Volume 41, 2002, pp 107 - 117 +
+
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Indicates the number of BiCGStab steps should be taken + before switching. + + + + + Indicates the number of GPBiCG steps should be taken + before switching. + + + + + Gets or sets the number of steps taken with the BiCgStab algorithm + before switching over to the GPBiCG algorithm. + + + + + Gets or sets the number of steps taken with the GPBiCG algorithm + before switching over to the BiCgStab algorithm. + + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Decide if to do steps with BiCgStab + + Number of iteration + true if yes, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + An incomplete, level 0, LU factorization preconditioner. + + + The ILU(0) algorithm was taken from:
+ Iterative methods for sparse linear systems
+ Yousef Saad
+ Algorithm is described in Chapter 10, section 10.3.2, page 275
+
+
+ + + The matrix holding the lower (L) and upper (U) matrices. The + decomposition matrices are combined to reduce storage. + + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + A new matrix containing the lower triagonal elements. + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + This class performs an Incomplete LU factorization with drop tolerance + and partial pivoting. The drop tolerance indicates which additional entries + will be dropped from the factorized LU matrices. + + + The ILUTP-Mem algorithm was taken from:
+ ILUTP_Mem: a Space-Efficient Incomplete LU Preconditioner +
+ Tzu-Yi Chen, Department of Mathematics and Computer Science,
+ Pomona College, Claremont CA 91711, USA
+ Published in:
+ Lecture Notes in Computer Science
+ Volume 3046 / 2004
+ pp. 20 - 28
+ Algorithm is described in Section 2, page 22 +
+
+ + + The default fill level. + + + + + The default drop tolerance. + + + + + The decomposed upper triangular matrix. + + + + + The decomposed lower triangular matrix. + + + + + The array containing the pivot values. + + + + + The fill level. + + + + + The drop tolerance. + + + + + The pivot tolerance. + + + + + Initializes a new instance of the class with the default settings. + + + + + Initializes a new instance of the class with the specified settings. + + + The amount of fill that is allowed in the matrix. The value is a fraction of + the number of non-zero entries in the original matrix. Values should be positive. + + + The absolute drop tolerance which indicates below what absolute value an entry + will be dropped from the matrix. A drop tolerance of 0.0 means that no values + will be dropped. Values should always be positive. + + + The pivot tolerance which indicates at what level pivoting will take place. A + value of 0.0 means that no pivoting will take place. + + + + + Gets or sets the amount of fill that is allowed in the matrix. The + value is a fraction of the number of non-zero entries in the original + matrix. The standard value is 200. + + + + Values should always be positive and can be higher than 1.0. A value lower + than 1.0 means that the eventual preconditioner matrix will have fewer + non-zero entries as the original matrix. A value higher than 1.0 means that + the eventual preconditioner can have more non-zero values than the original + matrix. + + + Note that any changes to the FillLevel after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the absolute drop tolerance which indicates below what absolute value + an entry will be dropped from the matrix. The standard value is 0.0001. + + + + The values should always be positive and can be larger than 1.0. A low value will + keep more small numbers in the preconditioner matrix. A high value will remove + more small numbers from the preconditioner matrix. + + + Note that any changes to the DropTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the pivot tolerance which indicates at what level pivoting will + take place. The standard value is 0.0 which means pivoting will never take place. + + + + The pivot tolerance is used to calculate if pivoting is necessary. Pivoting + will take place if any of the values in a row is bigger than the + diagonal value of that row divided by the pivot tolerance, i.e. pivoting + will take place if row(i,j) > row(i,i) / PivotTolerance for + any j that is not equal to i. + + + Note that any changes to the PivotTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the lower triagonal elements. + + + + Returns the pivot array. This array is not needed for normal use because + the preconditioner will return the solution vector values in the proper order. + + + This method is used for debugging purposes only and should normally not be used. + + The pivot array. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. Note that the + method takes a general matrix type. However internally the data is stored + as a sparse matrix. Therefore it is not recommended to pass a dense matrix. + + If is . + If is not a square matrix. + + + + Pivot elements in the according to internal pivot array + + Row to pivot in + + + + Was pivoting already performed + + Pivots already done + Current item to pivot + true if performed, otherwise false + + + + Swap columns in the + + Source . + First column index to swap + Second column index to swap + + + + Sort vector descending, not changing vector but placing sorted indicies to + + Start sort form + Sort till upper bound + Array with sorted vector indicies + Source + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + Pivot elements in according to internal pivot array + + Source . + Result after pivoting. + + + + An element sort algorithm for the class. + + + This sort algorithm is used to sort the columns in a sparse matrix based on + the value of the element on the diagonal of the matrix. + + + + + Sorts the elements of the vector in decreasing + fashion. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Sorts the elements of the vector in decreasing + fashion using heap sort algorithm. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Build heap for double indicies + + Root position + Length of + Indicies of + Target + + + + Sift double indicies + + Indicies of + Target + Root position + Length of + + + + Sorts the given integers in a decreasing fashion. + + The values. + + + + Sort the given integers in a decreasing fashion using heapsort algorithm + + Array of values to sort + Length of + + + + Build heap + + Target values array + Root position + Length of + + + + Sift values + + Target value array + Root position + Length of + + + + Exchange values in array + + Target values array + First value to exchange + Second value to exchange + + + + A simple milu(0) preconditioner. + + + Original Fortran code by Youcef Saad (07 January 2004) + + + + Use modified or standard ILU(0) + + + + Gets or sets a value indicating whether to use modified or standard ILU(0). + + + + + Gets a value indicating whether the preconditioner is initialized. + + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square or is not an + instance of SparseCompressedRowMatrixStorage. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector b. + The left hand side vector x. + + + + MILU0 is a simple milu(0) preconditioner. + + Order of the matrix. + Matrix values in CSR format (input). + Column indices (input). + Row pointers (input). + Matrix values in MSR format (output). + Row pointers and column indices (output). + Pointer to diagonal elements (output). + True if the modified/MILU algorithm should be used (recommended) + Returns 0 on success or k > 0 if a zero pivot was encountered at step k. + + + + A Multiple-Lanczos Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Multiple-Lanczos Bi-Conjugate Gradient stabilized (ML(k)-BiCGStab) solver is an 'improvement' + of the standard BiCgStab solver. + + + The algorithm was taken from:
+ ML(k)BiCGSTAB: A BiCGSTAB variant based on multiple Lanczos starting vectors +
+ Man-chung Yeung and Tony F. Chan +
+ SIAM Journal of Scientific Computing +
+ Volume 21, Number 4, pp. 1263 - 1290 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + The default number of starting vectors. + + + + + The collection of starting vectors which are used as the basis for the Krylov sub-space. + + + + + The number of starting vectors used by the algorithm + + + + + Gets or sets the number of starting vectors. + + + Must be larger than 1 and smaller than the number of variables in the matrix that + for which this solver will be used. + + + + + Resets the number of starting vectors to the default value. + + + + + Gets or sets a series of orthonormal vectors which will be used as basis for the + Krylov sub-space. + + + + + Gets the number of starting vectors to create + + Maximum number + Number of variables + Number of starting vectors to create + + + + Returns an array of starting vectors. + + The maximum number of starting vectors that should be created. + The number of variables. + + An array with starting vectors. The array will never be larger than the + but it may be smaller if + the is smaller than + the . + + + + + Create random vecrors array + + Number of vectors + Size of each vector + Array of random vectors + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Source A. + Residual data. + x data. + b data. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Transpose Free Quasi-Minimal Residual (TFQMR) iterative matrix solver. + + + + The TFQMR algorithm was taken from:
+ Iterative methods for sparse linear systems. +
+ Yousef Saad +
+ Algorithm is described in Chapter 7, section 7.4.3, page 219 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Is even? + + Number to check + true if even, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Matrix with sparse storage, intended for very large matrices where most of the cells are zero. + The underlying storage scheme is 3-array compressed-sparse-row (CSR) Format. + Wikipedia - CSR. + + + + + Gets the number of non zero elements in the matrix. + + The number of non zero elements. + + + + Create a new sparse matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new sparse matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable. + The enumerable is assumed to be in row-major order (row by row). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + + Create a new sparse matrix with the given number of rows and columns as a copy of the given array. + The array is assumed to be in column-major order (column by column). + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix and initialize each value to the same provided value. + + + + + Create a new sparse matrix and initialize each value using the provided init function. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Evaluates whether this matrix is symmetric. + + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + A vector with sparse storage, intended for very large vectors where most of the cells are zero. + + The sparse vector is not thread safe. + + + + Gets the number of non zero elements in the vector. + + The number of non zero elements. + + + + Create a new sparse vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new sparse vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new sparse vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector and initialize each value using the provided value. + + + + + Create a new sparse vector and initialize each value using the provided init function. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + Warning, the new 'sparse vector' with a non-zero scalar added to it will be a 100% filled + sparse vector and very inefficient. Would be better to work with a dense vector instead. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Negates vector and saves result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Multiplies a vector with a scalar. + + The vector to scale. + The scalar value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a scalar. + + The scalar value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a scalar. + + The vector to divide. + The scalar value. + The result of the division. + If is . + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + of each element of the vector of the given divisor. + + The vector whose elements we want to compute the modulus of. + The divisor to use, + The result of the calculation + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Creates a double sparse vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n,n,..', '(n,n,..)', '[n,n,...]', where n is a double. + + + A double sparse vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a real sparse vector to double-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a real sparse vector to double-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + double version of the class. + + + + + Initializes a new instance of the Vector class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Conjugates vector and save result to + + Target vector + + + + Negates vector and saves result to + + Target vector + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Divides each element of the vector by a scalar and stores the result in the result vector. + + + The scalar to divide with. + + + The vector to store the result of the division. + + + + + Divides a scalar by each element of the vector and stores the result in the result vector. + + The scalar to divide. + The vector to store the result of the division. + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise raise this vector to an exponent and store the result into the result vector. + + The exponent to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Returns the value of the absolute minimum element. + + The value of the absolute minimum element. + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the value of the absolute maximum element. + + The value of the absolute maximum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + + The p value. + + + Scalar ret = ( ∑|At(i)|^p )^(1/p) + + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Normalizes this vector to a unit vector with respect to the p-norm. + + + The p value. + + + This vector normalized to a unit vector with respect to the p-norm. + + + + + A Matrix class with dense storage. The underlying storage is a one dimensional array in column-major order (column by column). + + + + + Number of rows. + + Using this instead of the RowCount property to speed up calculating + a matrix index in the data array. + + + + Number of columns. + + Using this instead of the ColumnCount property to speed up calculating + a matrix index in the data array. + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new dense matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new dense matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to be in column-major order (column by column) and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + + Create a new dense matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable. + The enumerable is assumed to be in column-major order (column by column). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix and initialize each value to the same provided value. + + + + + Create a new dense matrix and initialize each value using the provided init function. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new dense matrix with values sampled from the provided random distribution. + + + + + Gets the matrix's data. + + The matrix's data. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of add + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the matrix and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Evaluates whether this matrix is symmetric. + + + + + A vector using dense storage. + + + + + Number of elements + + + + + Gets the vector's data. + + + + + Create a new dense vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new dense vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new dense vector directly binding to a raw array. + The array is used directly without copying. + Very efficient, but changes to the array and the vector will affect each other. + + + + + Create a new dense vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector and initialize each value using the provided value. + + + + + Create a new dense vector and initialize each value using the provided init function. + + + + + Create a new dense vector with values sampled from the provided random distribution. + + + + + Gets the vector's data. + + The vector's data. + + + + Returns a reference to the internal data structure. + + The DenseVector whose internal data we are + returning. + + A reference to the internal date of the given vector. + + + + + Returns a vector bound directly to a reference of the provided array. + + The array to bind to the DenseVector object. + + A DenseVector whose values are bound to the given array. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + The scalar to add. + The vector to store the result of the addition. + + + + Adds another vector to this vector and stores the result into the result vector. + + The vector to add to this one. + The vector to store the result of the addition. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The vector to store the result of the subtraction. + + + + Subtracts another vector from this vector and stores the result into the result vector. + + The vector to subtract from this one. + The vector to store the result of the subtraction. + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Negates vector and saves result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + The scalar to multiply. + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Multiplies a vector with a scalar. + + The vector to scale. + The scalar value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a scalar. + + The scalar value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a scalar. + + The vector to divide. + The scalar value. + The result of the division. + If is . + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + of each element of the vector of the given divisor. + + The vector whose elements we want to compute the modulus of. + The divisor to use, + The result of the calculation + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Creates a float dense vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n,n,..', '(n,n,..)', '[n,n,...]', where n is a float. + + + A float dense vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a real dense vector to float-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a real dense vector to float-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + A matrix type for diagonal matrices. + + + Diagonal matrices can be non-square matrices but the diagonal always starts + at element 0,0. A diagonal matrix will throw an exception if non diagonal + entries are set. The exception to this is when the off diagonal elements are + 0.0 or NaN; these settings will cause no change to the diagonal matrix. + + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new diagonal matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All diagonal cells of the matrix will be initialized to the provided value, all non-diagonal ones to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to contain the diagonal elements only and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new diagonal matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + The matrix to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + The array to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided enumerable. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new diagonal matrix with diagonal values sampled from the provided random distribution. + + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + If the result matrix's dimensions are not the same as this matrix. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to add. + The matrix to store the result of the division. + + + + Computes the determinant of this matrix. + + The determinant of this matrix. + + + + Returns the elements of the diagonal in a . + + The elements of the diagonal. + For non-square matrices, the method returns Min(Rows, Columns) elements where + i == j (i is the row index, and j is the column index). + + + + Copies the values of the given array to the diagonal. + + The array to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + + Copies the values of the given to the diagonal. + + The vector to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced L2 norm of the matrix. + The largest singular value of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + Calculates the condition number of this matrix. + The condition number of the matrix. + + + Computes the inverse of this matrix. + If is not a square matrix. + If is singular. + The inverse of this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Creates a matrix that contains the values from the requested sub-matrix. + + The row to start copying from. + The number of rows to copy. Must be positive. + The column to start copying from. + The number of columns to copy. Must be positive. + The requested sub-matrix. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + If or + is not positive. + + + + Permute the columns of a matrix according to a permutation. + + The column permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Permute the rows of a matrix according to a permutation. + + The row permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Evaluates whether this matrix is symmetric. + + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + A class which encapsulates the functionality of a Cholesky factorization. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Gets the determinant of the matrix for which the Cholesky matrix was computed. + + + + + Gets the log determinant of the matrix for which the Cholesky matrix was computed. + + + + + A class which encapsulates the functionality of a Cholesky factorization for dense matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Symmetric Householder reduction to tridiagonal form. + + Data array of matrix V (eigenvectors) + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tred2 by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + Data array of matrix V (eigenvectors) + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Nonsymmetric reduction to Hessenberg form. + + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Complex scalar division X/Y. + + Real part of X + Imaginary part of X + Real part of Y + Imaginary part of Y + Division result as a number. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an orthogonal matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Factorize matrix using the modified Gram-Schmidt method. + + Initial matrix. On exit is replaced by Q. + Number of rows in Q. + Number of columns in Q. + On exit is filled by R. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Gets or sets Tau vector. Contains additional information on Q - used for native solver. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The QR factorization method to use. + If is null. + If row count is less then column count + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + If SVD algorithm failed to converge with matrix . + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Gets the absolute value of determinant of the square matrix for which the EVD was computed. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + In the Math.Net implementation we also store a set of pivot elements for increased + numerical stability. The pivot elements encode a permutation matrix P such that P*A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Gets the determinant of the matrix for which the LU factorization was computed. + + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A (m x n) may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + If a factorization is peformed, the resulting Q matrix is an m x m matrix + and the R matrix is an m x n matrix. If a factorization is performed, the + resulting Q matrix is an m x n matrix and the R matrix is an n x n matrix. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD). + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets the two norm of the . + + The 2-norm of the . + + + + Gets the condition number max(S) / min(S) + + The condition number. + + + + Gets the determinant of the square matrix for which the SVD was computed. + + + + + A class which encapsulates the functionality of a Cholesky factorization for user matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Symmetric Householder reduction to tridiagonal form. + + The eigen vectors to work on. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tred2 by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + The eigen vectors to work on. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Nonsymmetric reduction to Hessenberg form. + + The eigen vectors to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + The eigen vectors to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Complex scalar division X/Y. + + Real part of X + Imaginary part of X + Real part of Y + Imaginary part of Y + Division result as a number. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an orthogonal matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The QR factorization method to use. + If is null. + + + + Generate column from initial matrix to work array + + Initial matrix + The first row + Column index + Generated vector + + + + Perform calculation of Q or R + + Work array + Q or R matrices + The first row + The last row + The first column + The last column + Number of available CPUs + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + + + + + Calculates absolute value of multiplied on signum function of + + Double value z1 + Double value z2 + Result multiplication of signum function and absolute value + + + + Swap column and + + Source matrix + The number of rows in + Column A index to swap + Column B index to swap + + + + Scale column by starting from row + + Source matrix + The number of rows in + Column to scale + Row to scale from + Scale value + + + + Scale vector by starting from index + + Source vector + Row to scale from + Scale value + + + + Given the Cartesian coordinates (da, db) of a point p, these fucntion return the parameters da, db, c, and s + associated with the Givens rotation that zeros the y-coordinate of the point. + + Provides the x-coordinate of the point p. On exit contains the parameter r associated with the Givens rotation + Provides the y-coordinate of the point p. On exit contains the parameter z associated with the Givens rotation + Contains the parameter c associated with the Givens rotation + Contains the parameter s associated with the Givens rotation + This is equivalent to the DROTG LAPACK routine. + + + + Calculate Norm 2 of the column in matrix starting from row + + Source matrix + The number of rows in + Column index + Start row index + Norm2 (Euclidean norm) of the column + + + + Calculate Norm 2 of the vector starting from index + + Source vector + Start index + Norm2 (Euclidean norm) of the vector + + + + Calculate dot product of and + + Source matrix + The number of rows in + Index of column A + Index of column B + Starting row index + Dot product value + + + + Performs rotation of points in the plane. Given two vectors x and y , + each vector element of these vectors is replaced as follows: x(i) = c*x(i) + s*y(i); y(i) = c*y(i) - s*x(i) + + Source matrix + The number of rows in + Index of column A + Index of column B + Scalar "c" value + Scalar "s" value + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + float version of the class. + + + + + Initializes a new instance of the Matrix class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Returns the conjugate transpose of this matrix. + + The conjugate transpose of this matrix. + + + + Puts the conjugate transpose of this matrix into the result matrix. + + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to divide by each element of the matrix. + The matrix to store the result of the division. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The matrix to store the result of the pointwise power. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Computes the Moore-Penrose Pseudo-Inverse of this matrix. + + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Calculates the p-norms of all row vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the p-norms of all column vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all row vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all column vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the value sum of each row vector. + + + + + Calculates the absolute value sum of each row vector. + + + + + Calculates the value sum of each column vector. + + + + + Calculates the absolute value sum of each column vector. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Bi-Conjugate Gradient Stabilized (BiCGStab) solver is an 'improvement' + of the standard Conjugate Gradient (CG) solver. Unlike the CG solver the + BiCGStab can be used on non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The Bi-CGSTAB algorithm was taken from:
+ Templates for the solution of linear systems: Building blocks + for iterative methods +
+ Richard Barrett, Michael Berry, Tony F. Chan, James Demmel, + June M. Donato, Jack Dongarra, Victor Eijkhout, Roldan Pozo, + Charles Romine and Henk van der Vorst +
+ Url: http://www.netlib.org/templates/Templates.html +
+ Algorithm is described in Chapter 2, section 2.3.8, page 27 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient , A. + The solution , b. + The result , x. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A composite matrix solver. The actual solver is made by a sequence of + matrix solvers. + + + + Solver based on:
+ Faster PDE-based simulations using robust composite linear solvers
+ S. Bhowmicka, P. Raghavan a,*, L. McInnes b, B. Norris
+ Future Generation Computer Systems, Vol 20, 2004, pp 373–387
+
+ + Note that if an iterator is passed to this solver it will be used for all the sub-solvers. + +
+
+ + + The collection of solvers that will be used + + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A diagonal preconditioner. The preconditioner uses the inverse + of the matrix diagonal as preconditioning values. + + + + + The inverse of the matrix diagonal. + + + + + Returns the decomposed matrix diagonal. + + The matrix diagonal. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + A Generalized Product Bi-Conjugate Gradient iterative matrix solver. + + + + The Generalized Product Bi-Conjugate Gradient (GPBiCG) solver is an + alternative version of the Bi-Conjugate Gradient stabilized (CG) solver. + Unlike the CG solver the GPBiCG solver can be used on + non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The GPBiCG algorithm was taken from:
+ GPBiCG(m,l): A hybrid of BiCGSTAB and GPBiCG methods with + efficiency and robustness +
+ S. Fujino +
+ Applied Numerical Mathematics, Volume 41, 2002, pp 107 - 117 +
+
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Indicates the number of BiCGStab steps should be taken + before switching. + + + + + Indicates the number of GPBiCG steps should be taken + before switching. + + + + + Gets or sets the number of steps taken with the BiCgStab algorithm + before switching over to the GPBiCG algorithm. + + + + + Gets or sets the number of steps taken with the GPBiCG algorithm + before switching over to the BiCgStab algorithm. + + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Decide if to do steps with BiCgStab + + Number of iteration + true if yes, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + An incomplete, level 0, LU factorization preconditioner. + + + The ILU(0) algorithm was taken from:
+ Iterative methods for sparse linear systems
+ Yousef Saad
+ Algorithm is described in Chapter 10, section 10.3.2, page 275
+
+
+ + + The matrix holding the lower (L) and upper (U) matrices. The + decomposition matrices are combined to reduce storage. + + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + A new matrix containing the lower triagonal elements. + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + This class performs an Incomplete LU factorization with drop tolerance + and partial pivoting. The drop tolerance indicates which additional entries + will be dropped from the factorized LU matrices. + + + The ILUTP-Mem algorithm was taken from:
+ ILUTP_Mem: a Space-Efficient Incomplete LU Preconditioner +
+ Tzu-Yi Chen, Department of Mathematics and Computer Science,
+ Pomona College, Claremont CA 91711, USA
+ Published in:
+ Lecture Notes in Computer Science
+ Volume 3046 / 2004
+ pp. 20 - 28
+ Algorithm is described in Section 2, page 22 +
+
+ + + The default fill level. + + + + + The default drop tolerance. + + + + + The decomposed upper triangular matrix. + + + + + The decomposed lower triangular matrix. + + + + + The array containing the pivot values. + + + + + The fill level. + + + + + The drop tolerance. + + + + + The pivot tolerance. + + + + + Initializes a new instance of the class with the default settings. + + + + + Initializes a new instance of the class with the specified settings. + + + The amount of fill that is allowed in the matrix. The value is a fraction of + the number of non-zero entries in the original matrix. Values should be positive. + + + The absolute drop tolerance which indicates below what absolute value an entry + will be dropped from the matrix. A drop tolerance of 0.0 means that no values + will be dropped. Values should always be positive. + + + The pivot tolerance which indicates at what level pivoting will take place. A + value of 0.0 means that no pivoting will take place. + + + + + Gets or sets the amount of fill that is allowed in the matrix. The + value is a fraction of the number of non-zero entries in the original + matrix. The standard value is 200. + + + + Values should always be positive and can be higher than 1.0. A value lower + than 1.0 means that the eventual preconditioner matrix will have fewer + non-zero entries as the original matrix. A value higher than 1.0 means that + the eventual preconditioner can have more non-zero values than the original + matrix. + + + Note that any changes to the FillLevel after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the absolute drop tolerance which indicates below what absolute value + an entry will be dropped from the matrix. The standard value is 0.0001. + + + + The values should always be positive and can be larger than 1.0. A low value will + keep more small numbers in the preconditioner matrix. A high value will remove + more small numbers from the preconditioner matrix. + + + Note that any changes to the DropTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the pivot tolerance which indicates at what level pivoting will + take place. The standard value is 0.0 which means pivoting will never take place. + + + + The pivot tolerance is used to calculate if pivoting is necessary. Pivoting + will take place if any of the values in a row is bigger than the + diagonal value of that row divided by the pivot tolerance, i.e. pivoting + will take place if row(i,j) > row(i,i) / PivotTolerance for + any j that is not equal to i. + + + Note that any changes to the PivotTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the lower triagonal elements. + + + + Returns the pivot array. This array is not needed for normal use because + the preconditioner will return the solution vector values in the proper order. + + + This method is used for debugging purposes only and should normally not be used. + + The pivot array. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. Note that the + method takes a general matrix type. However internally the data is stored + as a sparse matrix. Therefore it is not recommended to pass a dense matrix. + + If is . + If is not a square matrix. + + + + Pivot elements in the according to internal pivot array + + Row to pivot in + + + + Was pivoting already performed + + Pivots already done + Current item to pivot + true if performed, otherwise false + + + + Swap columns in the + + Source . + First column index to swap + Second column index to swap + + + + Sort vector descending, not changing vector but placing sorted indicies to + + Start sort form + Sort till upper bound + Array with sorted vector indicies + Source + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + Pivot elements in according to internal pivot array + + Source . + Result after pivoting. + + + + An element sort algorithm for the class. + + + This sort algorithm is used to sort the columns in a sparse matrix based on + the value of the element on the diagonal of the matrix. + + + + + Sorts the elements of the vector in decreasing + fashion. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Sorts the elements of the vector in decreasing + fashion using heap sort algorithm. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Build heap for double indicies + + Root position + Length of + Indicies of + Target + + + + Sift double indicies + + Indicies of + Target + Root position + Length of + + + + Sorts the given integers in a decreasing fashion. + + The values. + + + + Sort the given integers in a decreasing fashion using heapsort algorithm + + Array of values to sort + Length of + + + + Build heap + + Target values array + Root position + Length of + + + + Sift values + + Target value array + Root position + Length of + + + + Exchange values in array + + Target values array + First value to exchange + Second value to exchange + + + + A simple milu(0) preconditioner. + + + Original Fortran code by Youcef Saad (07 January 2004) + + + + Use modified or standard ILU(0) + + + + Gets or sets a value indicating whether to use modified or standard ILU(0). + + + + + Gets a value indicating whether the preconditioner is initialized. + + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square or is not an + instance of SparseCompressedRowMatrixStorage. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector b. + The left hand side vector x. + + + + MILU0 is a simple milu(0) preconditioner. + + Order of the matrix. + Matrix values in CSR format (input). + Column indices (input). + Row pointers (input). + Matrix values in MSR format (output). + Row pointers and column indices (output). + Pointer to diagonal elements (output). + True if the modified/MILU algorithm should be used (recommended) + Returns 0 on success or k > 0 if a zero pivot was encountered at step k. + + + + A Multiple-Lanczos Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Multiple-Lanczos Bi-Conjugate Gradient stabilized (ML(k)-BiCGStab) solver is an 'improvement' + of the standard BiCgStab solver. + + + The algorithm was taken from:
+ ML(k)BiCGSTAB: A BiCGSTAB variant based on multiple Lanczos starting vectors +
+ Man-chung Yeung and Tony F. Chan +
+ SIAM Journal of Scientific Computing +
+ Volume 21, Number 4, pp. 1263 - 1290 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + The default number of starting vectors. + + + + + The collection of starting vectors which are used as the basis for the Krylov sub-space. + + + + + The number of starting vectors used by the algorithm + + + + + Gets or sets the number of starting vectors. + + + Must be larger than 1 and smaller than the number of variables in the matrix that + for which this solver will be used. + + + + + Resets the number of starting vectors to the default value. + + + + + Gets or sets a series of orthonormal vectors which will be used as basis for the + Krylov sub-space. + + + + + Gets the number of starting vectors to create + + Maximum number + Number of variables + Number of starting vectors to create + + + + Returns an array of starting vectors. + + The maximum number of starting vectors that should be created. + The number of variables. + + An array with starting vectors. The array will never be larger than the + but it may be smaller if + the is smaller than + the . + + + + + Create random vectors array + + Number of vectors + Size of each vector + Array of random vectors + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Source A. + Residual data. + x data. + b data. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Transpose Free Quasi-Minimal Residual (TFQMR) iterative matrix solver. + + + + The TFQMR algorithm was taken from:
+ Iterative methods for sparse linear systems. +
+ Yousef Saad +
+ Algorithm is described in Chapter 7, section 7.4.3, page 219 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Is even? + + Number to check + true if even, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Matrix with sparse storage, intended for very large matrices where most of the cells are zero. + The underlying storage scheme is 3-array compressed-sparse-row (CSR) Format. + Wikipedia - CSR. + + + + + Gets the number of non zero elements in the matrix. + + The number of non zero elements. + + + + Create a new sparse matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new sparse matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable. + The enumerable is assumed to be in row-major order (row by row). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + + Create a new sparse matrix with the given number of rows and columns as a copy of the given array. + The array is assumed to be in column-major order (column by column). + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix and initialize each value to the same provided value. + + + + + Create a new sparse matrix and initialize each value using the provided init function. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Evaluates whether this matrix is symmetric. + + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + A vector with sparse storage, intended for very large vectors where most of the cells are zero. + + The sparse vector is not thread safe. + + + + Gets the number of non zero elements in the vector. + + The number of non zero elements. + + + + Create a new sparse vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new sparse vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new sparse vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector and initialize each value using the provided value. + + + + + Create a new sparse vector and initialize each value using the provided init function. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + Warning, the new 'sparse vector' with a non-zero scalar added to it will be a 100% filled + sparse vector and very inefficient. Would be better to work with a dense vector instead. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Negates vector and saves result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Multiplies a vector with a scalar. + + The vector to scale. + The scalar value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a scalar. + + The scalar value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a scalar. + + The vector to divide. + The scalar value. + The result of the division. + If is . + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + of each element of the vector of the given divisor. + + The vector whose elements we want to compute the modulus of. + The divisor to use, + The result of the calculation + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Creates a float sparse vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n,n,..', '(n,n,..)', '[n,n,...]', where n is a float. + + + A float sparse vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a real sparse vector to float-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a real sparse vector to float-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + float version of the class. + + + + + Initializes a new instance of the Vector class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Conjugates vector and save result to + + Target vector + + + + Negates vector and saves result to + + Target vector + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Divides each element of the vector by a scalar and stores the result in the result vector. + + + The scalar to divide with. + + + The vector to store the result of the division. + + + + + Divides a scalar by each element of the vector and stores the result in the result vector. + + The scalar to divide. + The vector to store the result of the division. + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise raise this vector to an exponent and store the result into the result vector. + + The exponent to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Returns the value of the absolute minimum element. + + The value of the absolute minimum element. + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the value of the absolute maximum element. + + The value of the absolute maximum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + + The p value. + + + Scalar ret = ( ∑|At(i)|^p )^(1/p) + + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Normalizes this vector to a unit vector with respect to the p-norm. + + + The p value. + + + This vector normalized to a unit vector with respect to the p-norm. + + + + + A Matrix class with dense storage. The underlying storage is a one dimensional array in column-major order (column by column). + + + + + Number of rows. + + Using this instead of the RowCount property to speed up calculating + a matrix index in the data array. + + + + Number of columns. + + Using this instead of the ColumnCount property to speed up calculating + a matrix index in the data array. + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new dense matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new dense matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to be in column-major order (column by column) and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + + Create a new dense matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable. + The enumerable is assumed to be in column-major order (column by column). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix and initialize each value to the same provided value. + + + + + Create a new dense matrix and initialize each value using the provided init function. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new dense matrix with values sampled from the provided random distribution. + + + + + Gets the matrix's data. + + The matrix's data. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of add + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the matrix and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Evaluates whether this matrix is symmetric. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A vector using dense storage. + + + + + Number of elements + + + + + Gets the vector's data. + + + + + Create a new dense vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new dense vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new dense vector directly binding to a raw array. + The array is used directly without copying. + Very efficient, but changes to the array and the vector will affect each other. + + + + + Create a new dense vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector and initialize each value using the provided value. + + + + + Create a new dense vector and initialize each value using the provided init function. + + + + + Create a new dense vector with values sampled from the provided random distribution. + + + + + Gets the vector's data. + + The vector's data. + + + + Returns a reference to the internal data structure. + + The DenseVector whose internal data we are + returning. + + A reference to the internal date of the given vector. + + + + + Returns a vector bound directly to a reference of the provided array. + + The array to bind to the DenseVector object. + + A DenseVector whose values are bound to the given array. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + The scalar to add. + The vector to store the result of the addition. + + + + Adds another vector to this vector and stores the result into the result vector. + + The vector to add to this one. + The vector to store the result of the addition. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The vector to store the result of the subtraction. + + + + Subtracts another vector from this vector and stores the result into the result vector. + + The vector to subtract from this one. + The vector to store the result of the subtraction. + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Negates vector and saves result to + + Target vector + + + + Conjugates vector and save result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + The scalar to multiply. + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Multiplies a vector with a complex. + + The vector to scale. + The Complex value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a complex. + + The Complex value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a complex. + + The vector to divide. + The Complex value. + The result of the division. + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Creates a Complex dense vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n;n;..', '(n;n;..)', '[n;n;...]', where n is a double. + + + A Complex dense vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a complex dense vector to double-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a complex dense vector to double-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + A matrix type for diagonal matrices. + + + Diagonal matrices can be non-square matrices but the diagonal always starts + at element 0,0. A diagonal matrix will throw an exception if non diagonal + entries are set. The exception to this is when the off diagonal elements are + 0.0 or NaN; these settings will cause no change to the diagonal matrix. + + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new diagonal matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All diagonal cells of the matrix will be initialized to the provided value, all non-diagonal ones to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to contain the diagonal elements only and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new diagonal matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + The matrix to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + The array to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided enumerable. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new diagonal matrix with diagonal values sampled from the provided random distribution. + + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + If the result matrix's dimensions are not the same as this matrix. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to add. + The matrix to store the result of the division. + + + + Computes the determinant of this matrix. + + The determinant of this matrix. + + + + Returns the elements of the diagonal in a . + + The elements of the diagonal. + For non-square matrices, the method returns Min(Rows, Columns) elements where + i == j (i is the row index, and j is the column index). + + + + Copies the values of the given array to the diagonal. + + The array to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + + Copies the values of the given to the diagonal. + + The vector to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced L2 norm of the matrix. + The largest singular value of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the Frobenius norm of this matrix. + The Frobenius norm of this matrix. + + + Calculates the condition number of this matrix. + The condition number of the matrix. + + + Computes the inverse of this matrix. + If is not a square matrix. + If is singular. + The inverse of this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Creates a matrix that contains the values from the requested sub-matrix. + + The row to start copying from. + The number of rows to copy. Must be positive. + The column to start copying from. + The number of columns to copy. Must be positive. + The requested sub-matrix. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + If or + is not positive. + + + + Permute the columns of a matrix according to a permutation. + + The column permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Permute the rows of a matrix according to a permutation. + + The row permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Evaluates whether this matrix is symmetric. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A class which encapsulates the functionality of a Cholesky factorization. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Gets the determinant of the matrix for which the Cholesky matrix was computed. + + + + + Gets the log determinant of the matrix for which the Cholesky matrix was computed. + + + + + A class which encapsulates the functionality of a Cholesky factorization for dense matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a complex matrix. + + + If A is hermitan, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is hermitan. + I.e. A = V*D*V' and V*VH=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Reduces a complex hermitian matrix to a real symmetric tridiagonal matrix using unitary similarity transformations. + + Source matrix to reduce + Output: Arrays for internal storage of real parts of eigenvalues + Output: Arrays for internal storage of imaginary parts of eigenvalues + Output: Arrays that contains further information about the transformations. + Order of initial matrix + This is derived from the Algol procedures HTRIDI by + Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + Data array of matrix V (eigenvectors) + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Determines eigenvectors by undoing the symmetric tridiagonalize transformation + + Data array of matrix V (eigenvectors) + Previously tridiagonalized matrix by . + Contains further information about the transformations + Input matrix order + This is derived from the Algol procedures HTRIBK, by + by Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Nonsymmetric reduction to Hessenberg form. + + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + Data array of the eigenvectors + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any complex square matrix A may be decomposed as A = QR where Q is an unitary mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an unitary matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Factorize matrix using the modified Gram-Schmidt method. + + Initial matrix. On exit is replaced by Q. + Number of rows in Q. + Number of columns in Q. + On exit is filled by R. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Gets or sets Tau vector. Contains additional information on Q - used for native solver. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The type of QR factorization to perform. + If is null. + If row count is less then column count + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + If SVD algorithm failed to converge with matrix . + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Gets the absolute value of determinant of the square matrix for which the EVD was computed. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + In the Math.Net implementation we also store a set of pivot elements for increased + numerical stability. The pivot elements encode a permutation matrix P such that P*A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Gets the determinant of the matrix for which the LU factorization was computed. + + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A (m x n) may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + If a factorization is peformed, the resulting Q matrix is an m x m matrix + and the R matrix is an m x n matrix. If a factorization is performed, the + resulting Q matrix is an m x n matrix and the R matrix is an n x n matrix. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD). + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets the two norm of the . + + The 2-norm of the . + + + + Gets the condition number max(S) / min(S) + + The condition number. + + + + Gets the determinant of the square matrix for which the SVD was computed. + + + + + A class which encapsulates the functionality of a Cholesky factorization for user matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a complex matrix. + + + If A is hermitan, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is hermitan. + I.e. A = V*D*V' and V*VH=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Reduces a complex hermitian matrix to a real symmetric tridiagonal matrix using unitary similarity transformations. + + Source matrix to reduce + Output: Arrays for internal storage of real parts of eigenvalues + Output: Arrays for internal storage of imaginary parts of eigenvalues + Output: Arrays that contains further information about the transformations. + Order of initial matrix + This is derived from the Algol procedures HTRIDI by + Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + The eigen vectors to work on. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Determines eigenvectors by undoing the symmetric tridiagonalize transformation + + The eigen vectors to work on. + Previously tridiagonalized matrix by . + Contains further information about the transformations + Input matrix order + This is derived from the Algol procedures HTRIBK, by + by Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Nonsymmetric reduction to Hessenberg form. + + The eigen vectors to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + The eigen vectors to work on. + The eigen values to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any complex square matrix A may be decomposed as A = QR where Q is an unitary mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an unitary matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The QR factorization method to use. + If is null. + + + + Generate column from initial matrix to work array + + Initial matrix + The first row + Column index + Generated vector + + + + Perform calculation of Q or R + + Work array + Q or R matrices + The first row + The last row + The first column + The last column + Number of available CPUs + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + + + + + Calculates absolute value of multiplied on signum function of + + Complex value z1 + Complex value z2 + Result multiplication of signum function and absolute value + + + + Interchanges two vectors and + + Source matrix + The number of rows in + Column A index to swap + Column B index to swap + + + + Scale column by starting from row + + Source matrix + The number of rows in + Column to scale + Row to scale from + Scale value + + + + Scale vector by starting from index + + Source vector + Row to scale from + Scale value + + + + Given the Cartesian coordinates (da, db) of a point p, these fucntion return the parameters da, db, c, and s + associated with the Givens rotation that zeros the y-coordinate of the point. + + Provides the x-coordinate of the point p. On exit contains the parameter r associated with the Givens rotation + Provides the y-coordinate of the point p. On exit contains the parameter z associated with the Givens rotation + Contains the parameter c associated with the Givens rotation + Contains the parameter s associated with the Givens rotation + This is equivalent to the DROTG LAPACK routine. + + + + Calculate Norm 2 of the column in matrix starting from row + + Source matrix + The number of rows in + Column index + Start row index + Norm2 (Euclidean norm) of the column + + + + Calculate Norm 2 of the vector starting from index + + Source vector + Start index + Norm2 (Euclidean norm) of the vector + + + + Calculate dot product of and conjugating the first vector. + + Source matrix + The number of rows in + Index of column A + Index of column B + Starting row index + Dot product value + + + + Performs rotation of points in the plane. Given two vectors x and y , + each vector element of these vectors is replaced as follows: x(i) = c*x(i) + s*y(i); y(i) = c*y(i) - s*x(i) + + Source matrix + The number of rows in + Index of column A + Index of column B + scalar cos value + scalar sin value + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Complex version of the class. + + + + + Initializes a new instance of the Matrix class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Returns the conjugate transpose of this matrix. + + The conjugate transpose of this matrix. + + + + Puts the conjugate transpose of this matrix into the result matrix. + + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to divide by each element of the matrix. + The matrix to store the result of the division. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The matrix to store the result of the pointwise power. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Pointwise applies the exponential function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Computes the Moore-Penrose Pseudo-Inverse of this matrix. + + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Calculates the p-norms of all row vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the p-norms of all column vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all row vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all column vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the value sum of each row vector. + + + + + Calculates the absolute value sum of each row vector. + + + + + Calculates the value sum of each column vector. + + + + + Calculates the absolute value sum of each column vector. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Bi-Conjugate Gradient Stabilized (BiCGStab) solver is an 'improvement' + of the standard Conjugate Gradient (CG) solver. Unlike the CG solver the + BiCGStab can be used on non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The Bi-CGSTAB algorithm was taken from:
+ Templates for the solution of linear systems: Building blocks + for iterative methods +
+ Richard Barrett, Michael Berry, Tony F. Chan, James Demmel, + June M. Donato, Jack Dongarra, Victor Eijkhout, Roldan Pozo, + Charles Romine and Henk van der Vorst +
+ Url: http://www.netlib.org/templates/Templates.html +
+ Algorithm is described in Chapter 2, section 2.3.8, page 27 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient , A. + The solution , b. + The result , x. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A composite matrix solver. The actual solver is made by a sequence of + matrix solvers. + + + + Solver based on:
+ Faster PDE-based simulations using robust composite linear solvers
+ S. Bhowmicka, P. Raghavan a,*, L. McInnes b, B. Norris
+ Future Generation Computer Systems, Vol 20, 2004, pp 373–387
+
+ + Note that if an iterator is passed to this solver it will be used for all the sub-solvers. + +
+
+ + + The collection of solvers that will be used + + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A diagonal preconditioner. The preconditioner uses the inverse + of the matrix diagonal as preconditioning values. + + + + + The inverse of the matrix diagonal. + + + + + Returns the decomposed matrix diagonal. + + The matrix diagonal. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + A Generalized Product Bi-Conjugate Gradient iterative matrix solver. + + + + The Generalized Product Bi-Conjugate Gradient (GPBiCG) solver is an + alternative version of the Bi-Conjugate Gradient stabilized (CG) solver. + Unlike the CG solver the GPBiCG solver can be used on + non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The GPBiCG algorithm was taken from:
+ GPBiCG(m,l): A hybrid of BiCGSTAB and GPBiCG methods with + efficiency and robustness +
+ S. Fujino +
+ Applied Numerical Mathematics, Volume 41, 2002, pp 107 - 117 +
+
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Indicates the number of BiCGStab steps should be taken + before switching. + + + + + Indicates the number of GPBiCG steps should be taken + before switching. + + + + + Gets or sets the number of steps taken with the BiCgStab algorithm + before switching over to the GPBiCG algorithm. + + + + + Gets or sets the number of steps taken with the GPBiCG algorithm + before switching over to the BiCgStab algorithm. + + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Decide if to do steps with BiCgStab + + Number of iteration + true if yes, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + An incomplete, level 0, LU factorization preconditioner. + + + The ILU(0) algorithm was taken from:
+ Iterative methods for sparse linear systems
+ Yousef Saad
+ Algorithm is described in Chapter 10, section 10.3.2, page 275
+
+
+ + + The matrix holding the lower (L) and upper (U) matrices. The + decomposition matrices are combined to reduce storage. + + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + A new matrix containing the lower triagonal elements. + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + This class performs an Incomplete LU factorization with drop tolerance + and partial pivoting. The drop tolerance indicates which additional entries + will be dropped from the factorized LU matrices. + + + The ILUTP-Mem algorithm was taken from:
+ ILUTP_Mem: a Space-Efficient Incomplete LU Preconditioner +
+ Tzu-Yi Chen, Department of Mathematics and Computer Science,
+ Pomona College, Claremont CA 91711, USA
+ Published in:
+ Lecture Notes in Computer Science
+ Volume 3046 / 2004
+ pp. 20 - 28
+ Algorithm is described in Section 2, page 22 +
+
+ + + The default fill level. + + + + + The default drop tolerance. + + + + + The decomposed upper triangular matrix. + + + + + The decomposed lower triangular matrix. + + + + + The array containing the pivot values. + + + + + The fill level. + + + + + The drop tolerance. + + + + + The pivot tolerance. + + + + + Initializes a new instance of the class with the default settings. + + + + + Initializes a new instance of the class with the specified settings. + + + The amount of fill that is allowed in the matrix. The value is a fraction of + the number of non-zero entries in the original matrix. Values should be positive. + + + The absolute drop tolerance which indicates below what absolute value an entry + will be dropped from the matrix. A drop tolerance of 0.0 means that no values + will be dropped. Values should always be positive. + + + The pivot tolerance which indicates at what level pivoting will take place. A + value of 0.0 means that no pivoting will take place. + + + + + Gets or sets the amount of fill that is allowed in the matrix. The + value is a fraction of the number of non-zero entries in the original + matrix. The standard value is 200. + + + + Values should always be positive and can be higher than 1.0. A value lower + than 1.0 means that the eventual preconditioner matrix will have fewer + non-zero entries as the original matrix. A value higher than 1.0 means that + the eventual preconditioner can have more non-zero values than the original + matrix. + + + Note that any changes to the FillLevel after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the absolute drop tolerance which indicates below what absolute value + an entry will be dropped from the matrix. The standard value is 0.0001. + + + + The values should always be positive and can be larger than 1.0. A low value will + keep more small numbers in the preconditioner matrix. A high value will remove + more small numbers from the preconditioner matrix. + + + Note that any changes to the DropTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the pivot tolerance which indicates at what level pivoting will + take place. The standard value is 0.0 which means pivoting will never take place. + + + + The pivot tolerance is used to calculate if pivoting is necessary. Pivoting + will take place if any of the values in a row is bigger than the + diagonal value of that row divided by the pivot tolerance, i.e. pivoting + will take place if row(i,j) > row(i,i) / PivotTolerance for + any j that is not equal to i. + + + Note that any changes to the PivotTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the lower triagonal elements. + + + + Returns the pivot array. This array is not needed for normal use because + the preconditioner will return the solution vector values in the proper order. + + + This method is used for debugging purposes only and should normally not be used. + + The pivot array. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. Note that the + method takes a general matrix type. However internally the data is stored + as a sparse matrix. Therefore it is not recommended to pass a dense matrix. + + If is . + If is not a square matrix. + + + + Pivot elements in the according to internal pivot array + + Row to pivot in + + + + Was pivoting already performed + + Pivots already done + Current item to pivot + true if performed, otherwise false + + + + Swap columns in the + + Source . + First column index to swap + Second column index to swap + + + + Sort vector descending, not changing vector but placing sorted indicies to + + Start sort form + Sort till upper bound + Array with sorted vector indicies + Source + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + Pivot elements in according to internal pivot array + + Source . + Result after pivoting. + + + + An element sort algorithm for the class. + + + This sort algorithm is used to sort the columns in a sparse matrix based on + the value of the element on the diagonal of the matrix. + + + + + Sorts the elements of the vector in decreasing + fashion. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Sorts the elements of the vector in decreasing + fashion using heap sort algorithm. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Build heap for double indicies + + Root position + Length of + Indicies of + Target + + + + Sift double indicies + + Indicies of + Target + Root position + Length of + + + + Sorts the given integers in a decreasing fashion. + + The values. + + + + Sort the given integers in a decreasing fashion using heapsort algorithm + + Array of values to sort + Length of + + + + Build heap + + Target values array + Root position + Length of + + + + Sift values + + Target value array + Root position + Length of + + + + Exchange values in array + + Target values array + First value to exchange + Second value to exchange + + + + A simple milu(0) preconditioner. + + + Original Fortran code by Youcef Saad (07 January 2004) + + + + Use modified or standard ILU(0) + + + + Gets or sets a value indicating whether to use modified or standard ILU(0). + + + + + Gets a value indicating whether the preconditioner is initialized. + + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square or is not an + instance of SparseCompressedRowMatrixStorage. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector b. + The left hand side vector x. + + + + MILU0 is a simple milu(0) preconditioner. + + Order of the matrix. + Matrix values in CSR format (input). + Column indices (input). + Row pointers (input). + Matrix values in MSR format (output). + Row pointers and column indices (output). + Pointer to diagonal elements (output). + True if the modified/MILU algorithm should be used (recommended) + Returns 0 on success or k > 0 if a zero pivot was encountered at step k. + + + + A Multiple-Lanczos Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Multiple-Lanczos Bi-Conjugate Gradient stabilized (ML(k)-BiCGStab) solver is an 'improvement' + of the standard BiCgStab solver. + + + The algorithm was taken from:
+ ML(k)BiCGSTAB: A BiCGSTAB variant based on multiple Lanczos starting vectors +
+ Man-chung Yeung and Tony F. Chan +
+ SIAM Journal of Scientific Computing +
+ Volume 21, Number 4, pp. 1263 - 1290 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + The default number of starting vectors. + + + + + The collection of starting vectors which are used as the basis for the Krylov sub-space. + + + + + The number of starting vectors used by the algorithm + + + + + Gets or sets the number of starting vectors. + + + Must be larger than 1 and smaller than the number of variables in the matrix that + for which this solver will be used. + + + + + Resets the number of starting vectors to the default value. + + + + + Gets or sets a series of orthonormal vectors which will be used as basis for the + Krylov sub-space. + + + + + Gets the number of starting vectors to create + + Maximum number + Number of variables + Number of starting vectors to create + + + + Returns an array of starting vectors. + + The maximum number of starting vectors that should be created. + The number of variables. + + An array with starting vectors. The array will never be larger than the + but it may be smaller if + the is smaller than + the . + + + + + Create random vecrors array + + Number of vectors + Size of each vector + Array of random vectors + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Source A. + Residual data. + x data. + b data. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Transpose Free Quasi-Minimal Residual (TFQMR) iterative matrix solver. + + + + The TFQMR algorithm was taken from:
+ Iterative methods for sparse linear systems. +
+ Yousef Saad +
+ Algorithm is described in Chapter 7, section 7.4.3, page 219 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Is even? + + Number to check + true if even, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Matrix with sparse storage, intended for very large matrices where most of the cells are zero. + The underlying storage scheme is 3-array compressed-sparse-row (CSR) Format. + Wikipedia - CSR. + + + + + Gets the number of non zero elements in the matrix. + + The number of non zero elements. + + + + Create a new sparse matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new sparse matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable. + The enumerable is assumed to be in row-major order (row by row). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + + Create a new sparse matrix with the given number of rows and columns as a copy of the given array. + The array is assumed to be in column-major order (column by column). + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix and initialize each value to the same provided value. + + + + + Create a new sparse matrix and initialize each value using the provided init function. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Evaluates whether this matrix is symmetric. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + A vector with sparse storage, intended for very large vectors where most of the cells are zero. + + The sparse vector is not thread safe. + + + + Gets the number of non zero elements in the vector. + + The number of non zero elements. + + + + Create a new sparse vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new sparse vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new sparse vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector and initialize each value using the provided value. + + + + + Create a new sparse vector and initialize each value using the provided init function. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + Warning, the new 'sparse vector' with a non-zero scalar added to it will be a 100% filled + sparse vector and very inefficient. Would be better to work with a dense vector instead. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Negates vector and saves result to + + Target vector + + + + Conjugates vector and save result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Multiplies a vector with a complex. + + The vector to scale. + The complex value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a complex. + + The complex value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a complex. + + The vector to divide. + The complex value. + The result of the division. + If is . + + + + Computes the modulus of each element of the vector of the given divisor. + + The vector whose elements we want to compute the modulus of. + The divisor to use, + The result of the calculation + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Creates a double sparse vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n;n;..', '(n;n;..)', '[n;n;...]', where n is a Complex. + + + A double sparse vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a complex sparse vector to double-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a complex sparse vector to double-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Complex version of the class. + + + + + Initializes a new instance of the Vector class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Conjugates vector and save result to + + Target vector + + + + Negates vector and saves result to + + Target vector + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Divides each element of the vector by a scalar and stores the result in the result vector. + + + The scalar to divide with. + + + The vector to store the result of the division. + + + + + Divides a scalar by each element of the vector and stores the result in the result vector. + + The scalar to divide. + The vector to store the result of the division. + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise raise this vector to an exponent and store the result into the result vector. + + The exponent to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Returns the value of the absolute minimum element. + + The value of the absolute minimum element. + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the value of the absolute maximum element. + + The value of the absolute maximum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + + The p value. + + + Scalar ret = ( ∑|At(i)|^p )^(1/p) + + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Normalizes this vector to a unit vector with respect to the p-norm. + + + The p value. + + + This vector normalized to a unit vector with respect to the p-norm. + + + + + A Matrix class with dense storage. The underlying storage is a one dimensional array in column-major order (column by column). + + + + + Number of rows. + + Using this instead of the RowCount property to speed up calculating + a matrix index in the data array. + + + + Number of columns. + + Using this instead of the ColumnCount property to speed up calculating + a matrix index in the data array. + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new dense matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new dense matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to be in column-major order (column by column) and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + + Create a new dense matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable. + The enumerable is assumed to be in column-major order (column by column). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix and initialize each value to the same provided value. + + + + + Create a new dense matrix and initialize each value using the provided init function. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new dense matrix with values sampled from the provided random distribution. + + + + + Gets the matrix's data. + + The matrix's data. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of add + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the matrix and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Evaluates whether this matrix is symmetric. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A vector using dense storage. + + + + + Number of elements + + + + + Gets the vector's data. + + + + + Create a new dense vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new dense vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new dense vector directly binding to a raw array. + The array is used directly without copying. + Very efficient, but changes to the array and the vector will affect each other. + + + + + Create a new dense vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector and initialize each value using the provided value. + + + + + Create a new dense vector and initialize each value using the provided init function. + + + + + Create a new dense vector with values sampled from the provided random distribution. + + + + + Gets the vector's data. + + The vector's data. + + + + Returns a reference to the internal data structure. + + The DenseVector whose internal data we are + returning. + + A reference to the internal date of the given vector. + + + + + Returns a vector bound directly to a reference of the provided array. + + The array to bind to the DenseVector object. + + A DenseVector whose values are bound to the given array. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + The scalar to add. + The vector to store the result of the addition. + + + + Adds another vector to this vector and stores the result into the result vector. + + The vector to add to this one. + The vector to store the result of the addition. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The vector to store the result of the subtraction. + + + + Subtracts another vector from this vector and stores the result into the result vector. + + The vector to subtract from this one. + The vector to store the result of the subtraction. + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Negates vector and saves result to + + Target vector + + + + Conjugates vector and save result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + The scalar to multiply. + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Multiplies a vector with a complex. + + The vector to scale. + The Complex32 value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a complex. + + The Complex32 value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a complex. + + The vector to divide. + The Complex32 value. + The result of the division. + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Creates a Complex32 dense vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n;n;..', '(n;n;..)', '[n;n;...]', where n is a double. + + + A Complex32 dense vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a complex dense vector to double-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a complex dense vector to double-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + A matrix type for diagonal matrices. + + + Diagonal matrices can be non-square matrices but the diagonal always starts + at element 0,0. A diagonal matrix will throw an exception if non diagonal + entries are set. The exception to this is when the off diagonal elements are + 0.0 or NaN; these settings will cause no change to the diagonal matrix. + + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new diagonal matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All diagonal cells of the matrix will be initialized to the provided value, all non-diagonal ones to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to contain the diagonal elements only and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new diagonal matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + The matrix to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + The array to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided enumerable. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new diagonal matrix with diagonal values sampled from the provided random distribution. + + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to add. + The matrix to store the result of the division. + + + + Computes the determinant of this matrix. + + The determinant of this matrix. + + + + Returns the elements of the diagonal in a . + + The elements of the diagonal. + For non-square matrices, the method returns Min(Rows, Columns) elements where + i == j (i is the row index, and j is the column index). + + + + Copies the values of the given array to the diagonal. + + The array to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + + Copies the values of the given to the diagonal. + + The vector to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced L2 norm of the matrix. + The largest singular value of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + Calculates the condition number of this matrix. + The condition number of the matrix. + + + Computes the inverse of this matrix. + If is not a square matrix. + If is singular. + The inverse of this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Creates a matrix that contains the values from the requested sub-matrix. + + The row to start copying from. + The number of rows to copy. Must be positive. + The column to start copying from. + The number of columns to copy. Must be positive. + The requested sub-matrix. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + If or + is not positive. + + + + Permute the columns of a matrix according to a permutation. + + The column permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Permute the rows of a matrix according to a permutation. + + The row permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Evaluates whether this matrix is symmetric. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A class which encapsulates the functionality of a Cholesky factorization. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Gets the determinant of the matrix for which the Cholesky matrix was computed. + + + + + Gets the log determinant of the matrix for which the Cholesky matrix was computed. + + + + + A class which encapsulates the functionality of a Cholesky factorization for dense matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a complex matrix. + + + If A is hermitan, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is hermitan. + I.e. A = V*D*V' and V*VH=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Reduces a complex hermitian matrix to a real symmetric tridiagonal matrix using unitary similarity transformations. + + Source matrix to reduce + Output: Arrays for internal storage of real parts of eigenvalues + Output: Arrays for internal storage of imaginary parts of eigenvalues + Output: Arrays that contains further information about the transformations. + Order of initial matrix + This is derived from the Algol procedures HTRIDI by + Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + Data array of matrix V (eigenvectors) + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Determines eigenvectors by undoing the symmetric tridiagonalize transformation + + Data array of matrix V (eigenvectors) + Previously tridiagonalized matrix by . + Contains further information about the transformations + Input matrix order + This is derived from the Algol procedures HTRIBK, by + by Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Nonsymmetric reduction to Hessenberg form. + + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + Data array of the eigenvectors + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any complex square matrix A may be decomposed as A = QR where Q is an unitary mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an unitary matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Factorize matrix using the modified Gram-Schmidt method. + + Initial matrix. On exit is replaced by Q. + Number of rows in Q. + Number of columns in Q. + On exit is filled by R. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Gets or sets Tau vector. Contains additional information on Q - used for native solver. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The QR factorization method to use. + If is null. + If row count is less then column count + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + If SVD algorithm failed to converge with matrix . + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Gets the absolute value of determinant of the square matrix for which the EVD was computed. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + In the Math.Net implementation we also store a set of pivot elements for increased + numerical stability. The pivot elements encode a permutation matrix P such that P*A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Gets the determinant of the matrix for which the LU factorization was computed. + + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A (m x n) may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + If a factorization is peformed, the resulting Q matrix is an m x m matrix + and the R matrix is an m x n matrix. If a factorization is performed, the + resulting Q matrix is an m x n matrix and the R matrix is an n x n matrix. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD). + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets the two norm of the . + + The 2-norm of the . + + + + Gets the condition number max(S) / min(S) + + The condition number. + + + + Gets the determinant of the square matrix for which the SVD was computed. + + + + + A class which encapsulates the functionality of a Cholesky factorization for user matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a complex matrix. + + + If A is hermitan, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is hermitan. + I.e. A = V*D*V' and V*VH=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Reduces a complex hermitian matrix to a real symmetric tridiagonal matrix using unitary similarity transformations. + + Source matrix to reduce + Output: Arrays for internal storage of real parts of eigenvalues + Output: Arrays for internal storage of imaginary parts of eigenvalues + Output: Arrays that contains further information about the transformations. + Order of initial matrix + This is derived from the Algol procedures HTRIDI by + Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + The eigen vectors to work on. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Determines eigenvectors by undoing the symmetric tridiagonalize transformation + + The eigen vectors to work on. + Previously tridiagonalized matrix by . + Contains further information about the transformations + Input matrix order + This is derived from the Algol procedures HTRIBK, by + by Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Nonsymmetric reduction to Hessenberg form. + + The eigen vectors to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + The eigen vectors to work on. + The eigen values to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any complex square matrix A may be decomposed as A = QR where Q is an unitary mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an unitary matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The QR factorization method to use. + If is null. + + + + Generate column from initial matrix to work array + + Initial matrix + The first row + Column index + Generated vector + + + + Perform calculation of Q or R + + Work array + Q or R matrices + The first row + The last row + The first column + The last column + Number of available CPUs + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + + + + + Calculates absolute value of multiplied on signum function of + + Complex32 value z1 + Complex32 value z2 + Result multiplication of signum function and absolute value + + + + Interchanges two vectors and + + Source matrix + The number of rows in + Column A index to swap + Column B index to swap + + + + Scale column by starting from row + + Source matrix + The number of rows in + Column to scale + Row to scale from + Scale value + + + + Scale vector by starting from index + + Source vector + Row to scale from + Scale value + + + + Given the Cartesian coordinates (da, db) of a point p, these fucntion return the parameters da, db, c, and s + associated with the Givens rotation that zeros the y-coordinate of the point. + + Provides the x-coordinate of the point p. On exit contains the parameter r associated with the Givens rotation + Provides the y-coordinate of the point p. On exit contains the parameter z associated with the Givens rotation + Contains the parameter c associated with the Givens rotation + Contains the parameter s associated with the Givens rotation + This is equivalent to the DROTG LAPACK routine. + + + + Calculate Norm 2 of the column in matrix starting from row + + Source matrix + The number of rows in + Column index + Start row index + Norm2 (Euclidean norm) of the column + + + + Calculate Norm 2 of the vector starting from index + + Source vector + Start index + Norm2 (Euclidean norm) of the vector + + + + Calculate dot product of and conjugating the first vector. + + Source matrix + The number of rows in + Index of column A + Index of column B + Starting row index + Dot product value + + + + Performs rotation of points in the plane. Given two vectors x and y , + each vector element of these vectors is replaced as follows: x(i) = c*x(i) + s*y(i); y(i) = c*y(i) - s*x(i) + + Source matrix + The number of rows in + Index of column A + Index of column B + scalar cos value + scalar sin value + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Complex32 version of the class. + + + + + Initializes a new instance of the Matrix class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Returns the conjugate transpose of this matrix. + + The conjugate transpose of this matrix. + + + + Puts the conjugate transpose of this matrix into the result matrix. + + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to divide by each element of the matrix. + The matrix to store the result of the division. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The matrix to store the result of the pointwise power. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Pointwise applies the exponential function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Computes the Moore-Penrose Pseudo-Inverse of this matrix. + + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Calculates the p-norms of all row vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the p-norms of all column vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all row vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all column vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the value sum of each row vector. + + + + + Calculates the absolute value sum of each row vector. + + + + + Calculates the value sum of each column vector. + + + + + Calculates the absolute value sum of each column vector. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Bi-Conjugate Gradient Stabilized (BiCGStab) solver is an 'improvement' + of the standard Conjugate Gradient (CG) solver. Unlike the CG solver the + BiCGStab can be used on non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The Bi-CGSTAB algorithm was taken from:
+ Templates for the solution of linear systems: Building blocks + for iterative methods +
+ Richard Barrett, Michael Berry, Tony F. Chan, James Demmel, + June M. Donato, Jack Dongarra, Victor Eijkhout, Roldan Pozo, + Charles Romine and Henk van der Vorst +
+ Url: http://www.netlib.org/templates/Templates.html +
+ Algorithm is described in Chapter 2, section 2.3.8, page 27 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient , A. + The solution , b. + The result , x. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A composite matrix solver. The actual solver is made by a sequence of + matrix solvers. + + + + Solver based on:
+ Faster PDE-based simulations using robust composite linear solvers
+ S. Bhowmicka, P. Raghavan a,*, L. McInnes b, B. Norris
+ Future Generation Computer Systems, Vol 20, 2004, pp 373–387
+
+ + Note that if an iterator is passed to this solver it will be used for all the sub-solvers. + +
+
+ + + The collection of solvers that will be used + + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A diagonal preconditioner. The preconditioner uses the inverse + of the matrix diagonal as preconditioning values. + + + + + The inverse of the matrix diagonal. + + + + + Returns the decomposed matrix diagonal. + + The matrix diagonal. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + A Generalized Product Bi-Conjugate Gradient iterative matrix solver. + + + + The Generalized Product Bi-Conjugate Gradient (GPBiCG) solver is an + alternative version of the Bi-Conjugate Gradient stabilized (CG) solver. + Unlike the CG solver the GPBiCG solver can be used on + non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The GPBiCG algorithm was taken from:
+ GPBiCG(m,l): A hybrid of BiCGSTAB and GPBiCG methods with + efficiency and robustness +
+ S. Fujino +
+ Applied Numerical Mathematics, Volume 41, 2002, pp 107 - 117 +
+
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Indicates the number of BiCGStab steps should be taken + before switching. + + + + + Indicates the number of GPBiCG steps should be taken + before switching. + + + + + Gets or sets the number of steps taken with the BiCgStab algorithm + before switching over to the GPBiCG algorithm. + + + + + Gets or sets the number of steps taken with the GPBiCG algorithm + before switching over to the BiCgStab algorithm. + + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Decide if to do steps with BiCgStab + + Number of iteration + true if yes, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + An incomplete, level 0, LU factorization preconditioner. + + + The ILU(0) algorithm was taken from:
+ Iterative methods for sparse linear systems
+ Yousef Saad
+ Algorithm is described in Chapter 10, section 10.3.2, page 275
+
+
+ + + The matrix holding the lower (L) and upper (U) matrices. The + decomposition matrices are combined to reduce storage. + + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + A new matrix containing the lower triagonal elements. + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + This class performs an Incomplete LU factorization with drop tolerance + and partial pivoting. The drop tolerance indicates which additional entries + will be dropped from the factorized LU matrices. + + + The ILUTP-Mem algorithm was taken from:
+ ILUTP_Mem: a Space-Efficient Incomplete LU Preconditioner +
+ Tzu-Yi Chen, Department of Mathematics and Computer Science,
+ Pomona College, Claremont CA 91711, USA
+ Published in:
+ Lecture Notes in Computer Science
+ Volume 3046 / 2004
+ pp. 20 - 28
+ Algorithm is described in Section 2, page 22 +
+
+ + + The default fill level. + + + + + The default drop tolerance. + + + + + The decomposed upper triangular matrix. + + + + + The decomposed lower triangular matrix. + + + + + The array containing the pivot values. + + + + + The fill level. + + + + + The drop tolerance. + + + + + The pivot tolerance. + + + + + Initializes a new instance of the class with the default settings. + + + + + Initializes a new instance of the class with the specified settings. + + + The amount of fill that is allowed in the matrix. The value is a fraction of + the number of non-zero entries in the original matrix. Values should be positive. + + + The absolute drop tolerance which indicates below what absolute value an entry + will be dropped from the matrix. A drop tolerance of 0.0 means that no values + will be dropped. Values should always be positive. + + + The pivot tolerance which indicates at what level pivoting will take place. A + value of 0.0 means that no pivoting will take place. + + + + + Gets or sets the amount of fill that is allowed in the matrix. The + value is a fraction of the number of non-zero entries in the original + matrix. The standard value is 200. + + + + Values should always be positive and can be higher than 1.0. A value lower + than 1.0 means that the eventual preconditioner matrix will have fewer + non-zero entries as the original matrix. A value higher than 1.0 means that + the eventual preconditioner can have more non-zero values than the original + matrix. + + + Note that any changes to the FillLevel after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the absolute drop tolerance which indicates below what absolute value + an entry will be dropped from the matrix. The standard value is 0.0001. + + + + The values should always be positive and can be larger than 1.0. A low value will + keep more small numbers in the preconditioner matrix. A high value will remove + more small numbers from the preconditioner matrix. + + + Note that any changes to the DropTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the pivot tolerance which indicates at what level pivoting will + take place. The standard value is 0.0 which means pivoting will never take place. + + + + The pivot tolerance is used to calculate if pivoting is necessary. Pivoting + will take place if any of the values in a row is bigger than the + diagonal value of that row divided by the pivot tolerance, i.e. pivoting + will take place if row(i,j) > row(i,i) / PivotTolerance for + any j that is not equal to i. + + + Note that any changes to the PivotTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the lower triagonal elements. + + + + Returns the pivot array. This array is not needed for normal use because + the preconditioner will return the solution vector values in the proper order. + + + This method is used for debugging purposes only and should normally not be used. + + The pivot array. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. Note that the + method takes a general matrix type. However internally the data is stored + as a sparse matrix. Therefore it is not recommended to pass a dense matrix. + + If is . + If is not a square matrix. + + + + Pivot elements in the according to internal pivot array + + Row to pivot in + + + + Was pivoting already performed + + Pivots already done + Current item to pivot + true if performed, otherwise false + + + + Swap columns in the + + Source . + First column index to swap + Second column index to swap + + + + Sort vector descending, not changing vector but placing sorted indicies to + + Start sort form + Sort till upper bound + Array with sorted vector indicies + Source + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + Pivot elements in according to internal pivot array + + Source . + Result after pivoting. + + + + An element sort algorithm for the class. + + + This sort algorithm is used to sort the columns in a sparse matrix based on + the value of the element on the diagonal of the matrix. + + + + + Sorts the elements of the vector in decreasing + fashion. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Sorts the elements of the vector in decreasing + fashion using heap sort algorithm. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Build heap for double indicies + + Root position + Length of + Indicies of + Target + + + + Sift double indicies + + Indicies of + Target + Root position + Length of + + + + Sorts the given integers in a decreasing fashion. + + The values. + + + + Sort the given integers in a decreasing fashion using heapsort algorithm + + Array of values to sort + Length of + + + + Build heap + + Target values array + Root position + Length of + + + + Sift values + + Target value array + Root position + Length of + + + + Exchange values in array + + Target values array + First value to exchange + Second value to exchange + + + + A simple milu(0) preconditioner. + + + Original Fortran code by Youcef Saad (07 January 2004) + + + + Use modified or standard ILU(0) + + + + Gets or sets a value indicating whether to use modified or standard ILU(0). + + + + + Gets a value indicating whether the preconditioner is initialized. + + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square or is not an + instance of SparseCompressedRowMatrixStorage. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector b. + The left hand side vector x. + + + + MILU0 is a simple milu(0) preconditioner. + + Order of the matrix. + Matrix values in CSR format (input). + Column indices (input). + Row pointers (input). + Matrix values in MSR format (output). + Row pointers and column indices (output). + Pointer to diagonal elements (output). + True if the modified/MILU algorithm should be used (recommended) + Returns 0 on success or k > 0 if a zero pivot was encountered at step k. + + + + A Multiple-Lanczos Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Multiple-Lanczos Bi-Conjugate Gradient stabilized (ML(k)-BiCGStab) solver is an 'improvement' + of the standard BiCgStab solver. + + + The algorithm was taken from:
+ ML(k)BiCGSTAB: A BiCGSTAB variant based on multiple Lanczos starting vectors +
+ Man-chung Yeung and Tony F. Chan +
+ SIAM Journal of Scientific Computing +
+ Volume 21, Number 4, pp. 1263 - 1290 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + The default number of starting vectors. + + + + + The collection of starting vectors which are used as the basis for the Krylov sub-space. + + + + + The number of starting vectors used by the algorithm + + + + + Gets or sets the number of starting vectors. + + + Must be larger than 1 and smaller than the number of variables in the matrix that + for which this solver will be used. + + + + + Resets the number of starting vectors to the default value. + + + + + Gets or sets a series of orthonormal vectors which will be used as basis for the + Krylov sub-space. + + + + + Gets the number of starting vectors to create + + Maximum number + Number of variables + Number of starting vectors to create + + + + Returns an array of starting vectors. + + The maximum number of starting vectors that should be created. + The number of variables. + + An array with starting vectors. The array will never be larger than the + but it may be smaller if + the is smaller than + the . + + + + + Create random vectors array + + Number of vectors + Size of each vector + Array of random vectors + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Source A. + Residual data. + x data. + b data. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Transpose Free Quasi-Minimal Residual (TFQMR) iterative matrix solver. + + + + The TFQMR algorithm was taken from:
+ Iterative methods for sparse linear systems. +
+ Yousef Saad +
+ Algorithm is described in Chapter 7, section 7.4.3, page 219 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Is even? + + Number to check + true if even, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Matrix with sparse storage, intended for very large matrices where most of the cells are zero. + The underlying storage scheme is 3-array compressed-sparse-row (CSR) Format. + Wikipedia - CSR. + + + + + Gets the number of non zero elements in the matrix. + + The number of non zero elements. + + + + Create a new sparse matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new sparse matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable. + The enumerable is assumed to be in row-major order (row by row). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + + Create a new sparse matrix with the given number of rows and columns as a copy of the given array. + The array is assumed to be in column-major order (column by column). + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix and initialize each value to the same provided value. + + + + + Create a new sparse matrix and initialize each value using the provided init function. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Evaluates whether this matrix is symmetric. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + A vector with sparse storage, intended for very large vectors where most of the cells are zero. + + The sparse vector is not thread safe. + + + + Gets the number of non zero elements in the vector. + + The number of non zero elements. + + + + Create a new sparse vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new sparse vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new sparse vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector and initialize each value using the provided value. + + + + + Create a new sparse vector and initialize each value using the provided init function. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + Warning, the new 'sparse vector' with a non-zero scalar added to it will be a 100% filled + sparse vector and very inefficient. Would be better to work with a dense vector instead. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Negates vector and saves result to + + Target vector + + + + Conjugates vector and save result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Multiplies a vector with a complex. + + The vector to scale. + The complex value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a complex. + + The complex value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a complex. + + The vector to divide. + The complex value. + The result of the division. + If is . + + + + Computes the modulus of each element of the vector of the given divisor. + + The vector whose elements we want to compute the modulus of. + The divisor to use, + The result of the calculation + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Creates a double sparse vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n;n;..', '(n;n;..)', '[n;n;...]', where n is a Complex32. + + + A double sparse vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a complex sparse vector to double-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a complex sparse vector to double-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Complex32 version of the class. + + + + + Initializes a new instance of the Vector class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Conjugates vector and save result to + + Target vector + + + + Negates vector and saves result to + + Target vector + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Divides each element of the vector by a scalar and stores the result in the result vector. + + + The scalar to divide with. + + + The vector to store the result of the division. + + + + + Divides a scalar by each element of the vector and stores the result in the result vector. + + The scalar to divide. + The vector to store the result of the division. + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise raise this vector to an exponent and store the result into the result vector. + + The exponent to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Returns the value of the absolute minimum element. + + The value of the absolute minimum element. + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the value of the absolute maximum element. + + The value of the absolute maximum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + + The p value. + + + Scalar ret = ( ∑|At(i)|^p )^(1/p) + + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Normalizes this vector to a unit vector with respect to the p-norm. + + + The p value. + + + This vector normalized to a unit vector with respect to the p-norm. + + + + + Generic linear algebra type builder, for situations where a matrix or vector + must be created in a generic way. Usage of generic builders should not be + required in normal user code. + + + + + Gets the value of 0.0 for type T. + + + + + Gets the value of 1.0 for type T. + + + + + Create a new matrix straight from an initialized matrix storage instance. + If you have an instance of a discrete storage type instead, use their direct methods instead. + + + + + Create a new matrix with the same kind of the provided example. + + + + + Create a new matrix with the same kind and dimensions of the provided example. + + + + + Create a new matrix with the same kind of the provided example. + + + + + Create a new matrix with a type that can represent and is closest to both provided samples. + + + + + Create a new matrix with a type that can represent and is closest to both provided samples and the dimensions of example. + + + + + Create a new dense matrix with values sampled from the provided random distribution. + + + + + Create a new dense matrix with values sampled from the standard distribution with a system random source. + + + + + Create a new dense matrix with values sampled from the standard distribution with a system random source. + + + + + Create a new positive definite dense matrix where each value is the product + of two samples from the provided random distribution. + + + + + Create a new positive definite dense matrix where each value is the product + of two samples from the standard distribution. + + + + + Create a new positive definite dense matrix where each value is the product + of two samples from the provided random distribution. + + + + + Create a new dense matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + + + + Create a new dense matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to be in column-major order (column by column) and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + + Create a new dense matrix and initialize each value to the same provided value. + + + + + Create a new dense matrix and initialize each value using the provided init function. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value using the provided init function. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new dense matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable. + The enumerable is assumed to be in column-major order (column by column). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable. + The enumerable is assumed to be in row-major order (row by row). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix from a 2D array of existing matrices. + The matrices in the array are not required to be dense already. + If the matrices do not align properly, they are placed on the top left + corner of their cell with the remaining fields left zero. + + + + + Create a new sparse matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a sparse matrix of T with the given number of rows and columns. + + The number of rows. + The number of columns. + + + + Create a new sparse matrix and initialize each value to the same provided value. + + + + + Create a new sparse matrix and initialize each value using the provided init function. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value using the provided init function. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new sparse matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable. + The enumerable is assumed to be in row-major order (row by row). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + + Create a new sparse matrix with the given number of rows and columns as a copy of the given array. + The array is assumed to be in column-major order (column by column). + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix from a 2D array of existing matrices. + The matrices in the array are not required to be sparse already. + If the matrices do not align properly, they are placed on the top left + corner of their cell with the remaining fields left zero. + + + + + Create a new diagonal matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + + + + Create a new diagonal matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to represent the diagonal values and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new square diagonal matrix directly binding to a raw array. + The array is assumed to represent the diagonal values and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new diagonal matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal matrix and initialize each diagonal value using the provided init function. + + + + + Create a new diagonal identity matrix with a one-diagonal. + + + + + Create a new diagonal identity matrix with a one-diagonal. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Generic linear algebra type builder, for situations where a matrix or vector + must be created in a generic way. Usage of generic builders should not be + required in normal user code. + + + + + Gets the value of 0.0 for type T. + + + + + Gets the value of 1.0 for type T. + + + + + Create a new vector straight from an initialized matrix storage instance. + If you have an instance of a discrete storage type instead, use their direct methods instead. + + + + + Create a new vector with the same kind of the provided example. + + + + + Create a new vector with the same kind and dimension of the provided example. + + + + + Create a new vector with the same kind of the provided example. + + + + + Create a new vector with a type that can represent and is closest to both provided samples. + + + + + Create a new vector with a type that can represent and is closest to both provided samples and the dimensions of example. + + + + + Create a new vector with a type that can represent and is closest to both provided samples. + + + + + Create a new dense vector with values sampled from the provided random distribution. + + + + + Create a new dense vector with values sampled from the standard distribution with a system random source. + + + + + Create a new dense vector with values sampled from the standard distribution with a system random source. + + + + + Create a new dense vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a dense vector of T with the given size. + + The size of the vector. + + + + Create a dense vector of T that is directly bound to the specified array. + + + + + Create a new dense vector and initialize each value using the provided value. + + + + + Create a new dense vector and initialize each value using the provided init function. + + + + + Create a new dense vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a sparse vector of T with the given size. + + The size of the vector. + + + + Create a new sparse vector and initialize each value using the provided value. + + + + + Create a new sparse vector and initialize each value using the provided init function. + + + + + Create a new sparse vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new matrix straight from an initialized matrix storage instance. + If you have an instance of a discrete storage type instead, use their direct methods instead. + + + + + Create a new matrix with the same kind of the provided example. + + + + + Create a new matrix with the same kind and dimensions of the provided example. + + + + + Create a new matrix with the same kind of the provided example. + + + + + Create a new matrix with a type that can represent and is closest to both provided samples. + + + + + Create a new matrix with a type that can represent and is closest to both provided samples and the dimensions of example. + + + + + Create a new dense matrix with values sampled from the provided random distribution. + + + + + Create a new dense matrix with values sampled from the standard distribution with a system random source. + + + + + Create a new dense matrix with values sampled from the standard distribution with a system random source. + + + + + Create a new positive definite dense matrix where each value is the product + of two samples from the provided random distribution. + + + + + Create a new positive definite dense matrix where each value is the product + of two samples from the standard distribution. + + + + + Create a new positive definite dense matrix where each value is the product + of two samples from the provided random distribution. + + + + + Create a new dense matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + + + + Create a new dense matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to be in column-major order (column by column) and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + + Create a new dense matrix and initialize each value to the same provided value. + + + + + Create a new dense matrix and initialize each value using the provided init function. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value using the provided init function. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new dense matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable. + The enumerable is assumed to be in column-major order (column by column). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix from a 2D array of existing matrices. + The matrices in the array are not required to be dense already. + If the matrices do not align properly, they are placed on the top left + corner of their cell with the remaining fields left zero. + + + + + Create a new sparse matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a sparse matrix of T with the given number of rows and columns. + + The number of rows. + The number of columns. + + + + Create a new sparse matrix and initialize each value to the same provided value. + + + + + Create a new sparse matrix and initialize each value using the provided init function. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value using the provided init function. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new sparse matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable. + The enumerable is assumed to be in row-major order (row by row). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + + Create a new sparse matrix with the given number of rows and columns as a copy of the given array. + The array is assumed to be in column-major order (column by column). + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix from a 2D array of existing matrices. + The matrices in the array are not required to be sparse already. + If the matrices do not align properly, they are placed on the top left + corner of their cell with the remaining fields left zero. + + + + + Create a new diagonal matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + + + + Create a new diagonal matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to represent the diagonal values and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new square diagonal matrix directly binding to a raw array. + The array is assumed to represent the diagonal values and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new diagonal matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal matrix and initialize each diagonal value using the provided init function. + + + + + Create a new diagonal identity matrix with a one-diagonal. + + + + + Create a new diagonal identity matrix with a one-diagonal. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new vector straight from an initialized matrix storage instance. + If you have an instance of a discrete storage type instead, use their direct methods instead. + + + + + Create a new vector with the same kind of the provided example. + + + + + Create a new vector with the same kind and dimension of the provided example. + + + + + Create a new vector with the same kind of the provided example. + + + + + Create a new vector with a type that can represent and is closest to both provided samples. + + + + + Create a new vector with a type that can represent and is closest to both provided samples and the dimensions of example. + + + + + Create a new vector with a type that can represent and is closest to both provided samples. + + + + + Create a new dense vector with values sampled from the provided random distribution. + + + + + Create a new dense vector with values sampled from the standard distribution with a system random source. + + + + + Create a new dense vector with values sampled from the standard distribution with a system random source. + + + + + Create a new dense vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a dense vector of T with the given size. + + The size of the vector. + + + + Create a dense vector of T that is directly bound to the specified array. + + + + + Create a new dense vector and initialize each value using the provided value. + + + + + Create a new dense vector and initialize each value using the provided init function. + + + + + Create a new dense vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a sparse vector of T with the given size. + + The size of the vector. + + + + Create a new sparse vector and initialize each value using the provided value. + + + + + Create a new sparse vector and initialize each value using the provided init function. + + + + + Create a new sparse vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + A class which encapsulates the functionality of a Cholesky factorization. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + Supported data types are double, single, , and . + + + + Gets the lower triangular form of the Cholesky matrix. + + + + + Gets the determinant of the matrix for which the Cholesky matrix was computed. + + + + + Gets the log determinant of the matrix for which the Cholesky matrix was computed. + + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + Supported data types are double, single, , and . + + + + Gets or sets a value indicating whether matrix is symmetric or not + + + + + Gets the absolute value of determinant of the square matrix for which the EVD was computed. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + Gets or sets the eigen values (λ) of matrix in ascending value. + + + + + Gets or sets eigenvectors. + + + + + Gets or sets the block diagonal eigenvalue matrix. + + + + + Solves a system of linear equations, AX = B, with A EVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, AX = B, with A EVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + Supported data types are double, single, , and . + + + + Classes that solves a system of linear equations, AX = B. + + Supported data types are double, single, , and . + + + + Solves a system of linear equations, AX = B. + + The right hand side Matrix, B. + The left hand side Matrix, X. + + + + Solves a system of linear equations, AX = B. + + The right hand side Matrix, B. + The left hand side Matrix, X. + + + + Solves a system of linear equations, Ax = b + + The right hand side vector, b. + The left hand side Vector, x. + + + + Solves a system of linear equations, Ax = b. + + The right hand side vector, b. + The left hand side Matrix>, x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + In the Math.Net implementation we also store a set of pivot elements for increased + numerical stability. The pivot elements encode a permutation matrix P such that P*A = L*U. + + + The computation of the LU factorization is done at construction time. + + Supported data types are double, single, , and . + + + + Gets the lower triangular factor. + + + + + Gets the upper triangular factor. + + + + + Gets the permutation applied to LU factorization. + + + + + Gets the determinant of the matrix for which the LU factorization was computed. + + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + The type of QR factorization go perform. + + + + + Compute the full QR factorization of a matrix. + + + + + Compute the thin QR factorization of a matrix. + + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A (m x n) may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + If a factorization is performed, the resulting Q matrix is an m x m matrix + and the R matrix is an m x n matrix. If a factorization is performed, the + resulting Q matrix is an m x n matrix and the R matrix is an n x n matrix. + + Supported data types are double, single, , and . + + + + Gets or sets orthogonal Q matrix + + + + + Gets the upper triangular factor R. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD). + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + Supported data types are double, single, , and . + + + Indicating whether U and VT matrices have been computed during SVD factorization. + + + + Gets the singular values (Σ) of matrix in ascending value. + + + + + Gets the left singular vectors (U - m-by-m unitary matrix) + + + + + Gets the transpose right singular vectors (transpose of V, an n-by-n unitary matrix) + + + + + Returns the singular values as a diagonal . + + The singular values as a diagonal . + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets the two norm of the . + + The 2-norm of the . + + + + Gets the condition number max(S) / min(S) + + The condition number. + + + + Gets the determinant of the square matrix for which the SVD was computed. + + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Defines the base class for Matrix classes. + + + Defines the base class for Matrix classes. + + Supported data types are double, single, , and . + + Defines the base class for Matrix classes. + + + Defines the base class for Matrix classes. + + + + + The value of 1.0. + + + + + The value of 0.0. + + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the matrix and stores the result in the result matrix. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts each element of the matrix from a scalar and stores the result in the result matrix. + + The scalar to subtract from. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar denominator to use. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar numerator to use. + The matrix to store the result of the division. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The matrix to store the result of the pointwise power. + + + + Pointwise raise this matrix to an exponent matrix and store the result into the result matrix. + + The exponent matrix to raise this matrix values to. + The matrix to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Adds a scalar to each element of the matrix. + + The scalar to add. + The result of the addition. + If the two matrices don't have the same dimensions. + + + + Adds a scalar to each element of the matrix and stores the result in the result matrix. + + The scalar to add. + The matrix to store the result of the addition. + If the two matrices don't have the same dimensions. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The result of the addition. + If the two matrices don't have the same dimensions. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the matrix. + + The scalar to subtract. + A new matrix containing the subtraction of this matrix and the scalar. + + + + Subtracts a scalar from each element of the matrix and stores the result in the result matrix. + + The scalar to subtract. + The matrix to store the result of the subtraction. + If this matrix and are not the same size. + + + + Subtracts each element of the matrix from a scalar. + + The scalar to subtract from. + A new matrix containing the subtraction of the scalar and this matrix. + + + + Subtracts each element of the matrix from a scalar and stores the result in the result matrix. + + The scalar to subtract from. + The matrix to store the result of the subtraction. + If this matrix and are not the same size. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The result of the subtraction. + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + If the two matrices don't have the same dimensions. + + + + Multiplies each element of this matrix with a scalar. + + The scalar to multiply with. + The result of the multiplication. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + If the result matrix's dimensions are not the same as this matrix. + + + + Divides each element of this matrix with a scalar. + + The scalar to divide with. + The result of the division. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + If the result matrix's dimensions are not the same as this matrix. + + + + Divides a scalar by each element of the matrix. + + The scalar to divide. + The result of the division. + + + + Divides a scalar by each element of the matrix and places results into the result matrix. + + The scalar to divide. + The matrix to store the result of the division. + If the result matrix's dimensions are not the same as this matrix. + + + + Multiplies this matrix by a vector and returns the result. + + The vector to multiply with. + The result of the multiplication. + If this.ColumnCount != rightSide.Count. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + If result.Count != this.RowCount. + If this.ColumnCount != .Count. + + + + Left multiply a matrix with a vector ( = vector * matrix ). + + The vector to multiply with. + The result of the multiplication. + If this.RowCount != .Count. + + + + Left multiply a matrix with a vector ( = vector * matrix ) and place the result in the result vector. + + The vector to multiply with. + The result of the multiplication. + If result.Count != this.ColumnCount. + If this.RowCount != .Count. + + + + Left multiply a matrix with a vector ( = vector * matrix ) and place the result in the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + If this.Columns != other.Rows. + If the result matrix's dimensions are not the this.Rows x other.Columns. + + + + Multiplies this matrix with another matrix and returns the result. + + The matrix to multiply with. + If this.Columns != other.Rows. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + If this.Columns != other.ColumnCount. + If the result matrix's dimensions are not the this.RowCount x other.RowCount. + + + + Multiplies this matrix with transpose of another matrix and returns the result. + + The matrix to multiply with. + If this.Columns != other.ColumnCount. + The result of the multiplication. + + + + Multiplies the transpose of this matrix by a vector and returns the result. + + The vector to multiply with. + The result of the multiplication. + If this.RowCount != rightSide.Count. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + If result.Count != this.ColumnCount. + If this.RowCount != .Count. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + If this.Rows != other.RowCount. + If the result matrix's dimensions are not the this.ColumnCount x other.ColumnCount. + + + + Multiplies the transpose of this matrix with another matrix and returns the result. + + The matrix to multiply with. + If this.Rows != other.RowCount. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + If this.Columns != other.ColumnCount. + If the result matrix's dimensions are not the this.RowCount x other.RowCount. + + + + Multiplies this matrix with the conjugate transpose of another matrix and returns the result. + + The matrix to multiply with. + If this.Columns != other.ColumnCount. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix by a vector and returns the result. + + The vector to multiply with. + The result of the multiplication. + If this.RowCount != rightSide.Count. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + If result.Count != this.ColumnCount. + If this.RowCount != .Count. + + + + Multiplies the conjugate transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + If this.Rows != other.RowCount. + If the result matrix's dimensions are not the this.ColumnCount x other.ColumnCount. + + + + Multiplies the conjugate transpose of this matrix with another matrix and returns the result. + + The matrix to multiply with. + If this.Rows != other.RowCount. + The result of the multiplication. + + + + Raises this square matrix to a positive integer exponent and places the results into the result matrix. + + The positive integer exponent to raise the matrix to. + The result of the power. + + + + Multiplies this square matrix with another matrix and returns the result. + + The positive integer exponent to raise the matrix to. + + + + Negate each element of this matrix. + + A matrix containing the negated values. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + if the result matrix's dimensions are not the same as this matrix. + + + + Complex conjugate each element of this matrix. + + A matrix containing the conjugated values. + + + + Complex conjugate each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + if the result matrix's dimensions are not the same as this matrix. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the matrix. + + The scalar denominator to use. + A matrix containing the results. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the matrix. + + The scalar numerator to use. + A matrix containing the results. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the matrix. + + The scalar numerator to use. + Matrix to store the results in. + + + + Computes the remainder (matrix % divisor), where the result has the sign of the dividend, + for each element of the matrix. + + The scalar denominator to use. + A matrix containing the results. + + + + Computes the remainder (matrix % divisor), where the result has the sign of the dividend, + for each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (dividend % matrix), where the result has the sign of the dividend, + for each element of the matrix. + + The scalar numerator to use. + A matrix containing the results. + + + + Computes the remainder (dividend % matrix), where the result has the sign of the dividend, + for each element of the matrix. + + The scalar numerator to use. + Matrix to store the results in. + + + + Pointwise multiplies this matrix with another matrix. + + The matrix to pointwise multiply with this one. + If this matrix and are not the same size. + A new matrix that is the pointwise multiplication of this matrix and . + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + If this matrix and are not the same size. + If this matrix and are not the same size. + + + + Pointwise divide this matrix by another matrix. + + The pointwise denominator matrix to use. + If this matrix and are not the same size. + A new matrix that is the pointwise division of this matrix and . + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use. + The matrix to store the result of the pointwise division. + If this matrix and are not the same size. + If this matrix and are not the same size. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + + + + Pointwise raise this matrix to an exponent. + + The exponent to raise this matrix values to. + The matrix to store the result into. + If this matrix and are not the same size. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + + + + Pointwise raise this matrix to an exponent. + + The exponent to raise this matrix values to. + The matrix to store the result into. + If this matrix and are not the same size. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this matrix by another matrix. + + The pointwise denominator matrix to use. + If this matrix and are not the same size. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this matrix by another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use. + The matrix to store the result of the pointwise modulus. + If this matrix and are not the same size. + If this matrix and are not the same size. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this matrix by another matrix. + + The pointwise denominator matrix to use. + If this matrix and are not the same size. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this matrix by another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use. + The matrix to store the result of the pointwise remainder. + If this matrix and are not the same size. + If this matrix and are not the same size. + + + + Helper function to apply a unary function to a matrix. The function + f modifies the matrix given to it in place. Before its + called, a copy of the 'this' matrix is first created, then passed to + f. The copy is then returned as the result + + Function which takes a matrix, modifies it in place and returns void + New instance of matrix which is the result + + + + Helper function to apply a unary function which modifies a matrix + in place. + + Function which takes a matrix, modifies it in place and returns void + The matrix to be passed to f and where the result is to be stored + If this vector and are not the same size. + + + + Helper function to apply a binary function which takes two matrices + and modifies the latter in place. A copy of the "this" matrix is + first made and then passed to f together with the other matrix. The + copy is then returned as the result + + Function which takes two matrices, modifies the second in place and returns void + The other matrix to be passed to the function as argument. It is not modified + The resulting matrix + If this amtrix and are not the same dimension. + + + + Helper function to apply a binary function which takes two matrices + and modifies the second one in place + + Function which takes two matrices, modifies the second in place and returns void + The other matrix to be passed to the function as argument. It is not modified + The resulting matrix + If this matrix and are not the same dimension. + + + + Pointwise applies the exponent function to each value. + + + + + Pointwise applies the exponent function to each value. + + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the natural logarithm function to each value. + + + + + Pointwise applies the natural logarithm function to each value. + + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the abs function to each value + + + + + Pointwise applies the abs function to each value + + The vector to store the result + + + + Pointwise applies the acos function to each value + + + + + Pointwise applies the acos function to each value + + The vector to store the result + + + + Pointwise applies the asin function to each value + + + + + Pointwise applies the asin function to each value + + The vector to store the result + + + + Pointwise applies the atan function to each value + + + + + Pointwise applies the atan function to each value + + The vector to store the result + + + + Pointwise applies the atan2 function to each value of the current + matrix and a given other matrix being the 'x' of atan2 and the + 'this' matrix being the 'y' + + + + + + + Pointwise applies the atan2 function to each value of the current + matrix and a given other matrix being the 'x' of atan2 and the + 'this' matrix being the 'y' + + + + + + + Pointwise applies the ceiling function to each value + + + + + Pointwise applies the ceiling function to each value + + The vector to store the result + + + + Pointwise applies the cos function to each value + + + + + Pointwise applies the cos function to each value + + The vector to store the result + + + + Pointwise applies the cosh function to each value + + + + + Pointwise applies the cosh function to each value + + The vector to store the result + + + + Pointwise applies the floor function to each value + + + + + Pointwise applies the floor function to each value + + The vector to store the result + + + + Pointwise applies the log10 function to each value + + + + + Pointwise applies the log10 function to each value + + The vector to store the result + + + + Pointwise applies the round function to each value + + + + + Pointwise applies the round function to each value + + The vector to store the result + + + + Pointwise applies the sign function to each value + + + + + Pointwise applies the sign function to each value + + The vector to store the result + + + + Pointwise applies the sin function to each value + + + + + Pointwise applies the sin function to each value + + The vector to store the result + + + + Pointwise applies the sinh function to each value + + + + + Pointwise applies the sinh function to each value + + The vector to store the result + + + + Pointwise applies the sqrt function to each value + + + + + Pointwise applies the sqrt function to each value + + The vector to store the result + + + + Pointwise applies the tan function to each value + + + + + Pointwise applies the tan function to each value + + The vector to store the result + + + + Pointwise applies the tanh function to each value + + + + + Pointwise applies the tanh function to each value + + The vector to store the result + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + + Calculates the rank of the matrix. + + effective numerical rank, obtained from SVD + + + + Calculates the nullity of the matrix. + + effective numerical nullity, obtained from SVD + + + Calculates the condition number of this matrix. + The condition number of the matrix. + The condition number is calculated using singular value decomposition. + + + Computes the determinant of this matrix. + The determinant of this matrix. + + + + Computes an orthonormal basis for the null space of this matrix, + also known as the kernel of the corresponding matrix transformation. + + + + + Computes an orthonormal basis for the column space of this matrix, + also known as the range or image of the corresponding matrix transformation. + + + + Computes the inverse of this matrix. + The inverse of this matrix. + + + Computes the Moore-Penrose Pseudo-Inverse of this matrix. + + + + Computes the Kronecker product of this matrix with the given matrix. The new matrix is M-by-N + with M = this.Rows * lower.Rows and N = this.Columns * lower.Columns. + + The other matrix. + The Kronecker product of the two matrices. + + + + Computes the Kronecker product of this matrix with the given matrix. The new matrix is M-by-N + with M = this.Rows * lower.Rows and N = this.Columns * lower.Columns. + + The other matrix. + The Kronecker product of the two matrices. + If the result matrix's dimensions are not (this.Rows * lower.rows) x (this.Columns * lower.Columns). + + + + Pointwise applies the minimum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the minimum with a scalar to each value. + + The scalar value to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the maximum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the maximum with a scalar to each value. + + The scalar value to compare to. + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the absolute minimum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the absolute minimum with a scalar to each value. + + The scalar value to compare to. + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the absolute maximum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the absolute maximum with a scalar to each value. + + The scalar value to compare to. + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the minimum with the values of another matrix to each value. + + The matrix with the values to compare to. + + + + Pointwise applies the minimum with the values of another matrix to each value. + + The matrix with the values to compare to. + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the maximum with the values of another matrix to each value. + + The matrix with the values to compare to. + + + + Pointwise applies the maximum with the values of another matrix to each value. + + The matrix with the values to compare to. + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the absolute minimum with the values of another matrix to each value. + + The matrix with the values to compare to. + + + + Pointwise applies the absolute minimum with the values of another matrix to each value. + + The matrix with the values to compare to. + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the absolute maximum with the values of another matrix to each value. + + The matrix with the values to compare to. + + + + Pointwise applies the absolute maximum with the values of another matrix to each value. + + The matrix with the values to compare to. + The matrix to store the result. + If this matrix and are not the same size. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced L2 norm of the matrix. + The largest singular value of the matrix. + + For sparse matrices, the L2 norm is computed using a dense implementation of singular value decomposition. + In a later release, it will be replaced with a sparse implementation. + + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Calculates the p-norms of all row vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the p-norms of all column vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all row vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all column vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the value sum of each row vector. + + + + + Calculates the value sum of each column vector. + + + + + Calculates the absolute value sum of each row vector. + + + + + Calculates the absolute value sum of each column vector. + + + + + Indicates whether the current object is equal to another object of the same type. + + + An object to compare with this object. + + + true if the current object is equal to the parameter; otherwise, false. + + + + + Determines whether the specified is equal to this instance. + + The to compare with this instance. + + true if the specified is equal to this instance; otherwise, false. + + + + + Returns a hash code for this instance. + + + A hash code for this instance, suitable for use in hashing algorithms and data structures like a hash table. + + + + + Returns a string that describes the type, dimensions and shape of this matrix. + + + + + Returns a string 2D array that summarizes the content of this matrix. + + + + + Returns a string 2D array that summarizes the content of this matrix. + + + + + Returns a string that summarizes the content of this matrix. + + + + + Returns a string that summarizes the content of this matrix. + + + + + Returns a string that summarizes this matrix. + + + + + Returns a string that summarizes this matrix. + The maximum number of cells can be configured in the class. + + + + + Returns a string that summarizes this matrix. + The maximum number of cells can be configured in the class. + The format string is ignored. + + + + + Initializes a new instance of the Matrix class. + + + + + Gets the raw matrix data storage. + + + + + Gets the number of columns. + + The number of columns. + + + + Gets the number of rows. + + The number of rows. + + + + Gets or sets the value at the given row and column, with range checking. + + + The row of the element. + + + The column of the element. + + The value to get or set. + This method is ranged checked. and + to get and set values without range checking. + + + + Retrieves the requested element without range checking. + + + The row of the element. + + + The column of the element. + + + The requested element. + + + + + Sets the value of the given element without range checking. + + + The row of the element. + + + The column of the element. + + + The value to set the element to. + + + + + Sets all values to zero. + + + + + Sets all values of a row to zero. + + + + + Sets all values of a column to zero. + + + + + Sets all values for all of the chosen rows to zero. + + + + + Sets all values for all of the chosen columns to zero. + + + + + Sets all values of a sub-matrix to zero. + + + + + Set all values whose absolute value is smaller than the threshold to zero, in-place. + + + + + Set all values that meet the predicate to zero, in-place. + + + + + Creates a clone of this instance. + + + A clone of the instance. + + + + + Copies the elements of this matrix to the given matrix. + + + The matrix to copy values into. + + + If target is . + + + If this and the target matrix do not have the same dimensions.. + + + + + Copies a row into an Vector. + + The row to copy. + A Vector containing the copied elements. + If is negative, + or greater than or equal to the number of rows. + + + + Copies a row into to the given Vector. + + The row to copy. + The Vector to copy the row into. + If the result vector is . + If is negative, + or greater than or equal to the number of rows. + If this.Columns != result.Count. + + + + Copies the requested row elements into a new Vector. + + The row to copy elements from. + The column to start copying from. + The number of elements to copy. + A Vector containing the requested elements. + If: + is negative, + or greater than or equal to the number of rows. + is negative, + or greater than or equal to the number of columns. + (columnIndex + length) >= Columns. + If is not positive. + + + + Copies the requested row elements into a new Vector. + + The row to copy elements from. + The column to start copying from. + The number of elements to copy. + The Vector to copy the column into. + If the result Vector is . + If is negative, + or greater than or equal to the number of columns. + If is negative, + or greater than or equal to the number of rows. + If + + is greater than or equal to the number of rows. + If is not positive. + If result.Count < length. + + + + Copies a column into a new Vector>. + + The column to copy. + A Vector containing the copied elements. + If is negative, + or greater than or equal to the number of columns. + + + + Copies a column into to the given Vector. + + The column to copy. + The Vector to copy the column into. + If the result Vector is . + If is negative, + or greater than or equal to the number of columns. + If this.Rows != result.Count. + + + + Copies the requested column elements into a new Vector. + + The column to copy elements from. + The row to start copying from. + The number of elements to copy. + A Vector containing the requested elements. + If: + is negative, + or greater than or equal to the number of columns. + is negative, + or greater than or equal to the number of rows. + (rowIndex + length) >= Rows. + + If is not positive. + + + + Copies the requested column elements into the given vector. + + The column to copy elements from. + The row to start copying from. + The number of elements to copy. + The Vector to copy the column into. + If the result Vector is . + If is negative, + or greater than or equal to the number of columns. + If is negative, + or greater than or equal to the number of rows. + If + + is greater than or equal to the number of rows. + If is not positive. + If result.Count < length. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Creates a matrix that contains the values from the requested sub-matrix. + + The row to start copying from. + The number of rows to copy. Must be positive. + The column to start copying from. + The number of columns to copy. Must be positive. + The requested sub-matrix. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + If or + is not positive. + + + + Returns the elements of the diagonal in a Vector. + + The elements of the diagonal. + For non-square matrices, the method returns Min(Rows, Columns) elements where + i == j (i is the row index, and j is the column index). + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Creates a new matrix and inserts the given column at the given index. + + The index of where to insert the column. + The column to insert. + A new matrix with the inserted column. + If is . + If is < zero or > the number of columns. + If the size of != the number of rows. + + + + Creates a new matrix with the given column removed. + + The index of the column to remove. + A new matrix without the chosen column. + If is < zero or >= the number of columns. + + + + Copies the values of the given Vector to the specified column. + + The column to copy the values to. + The vector to copy the values from. + If is . + If is less than zero, + or greater than or equal to the number of columns. + If the size of does not + equal the number of rows of this Matrix. + + + + Copies the values of the given Vector to the specified sub-column. + + The column to copy the values to. + The row to start copying to. + The number of elements to copy. + The vector to copy the values from. + If is . + If is less than zero, + or greater than or equal to the number of columns. + If the size of does not + equal the number of rows of this Matrix. + + + + Copies the values of the given array to the specified column. + + The column to copy the values to. + The array to copy the values from. + If is . + If is less than zero, + or greater than or equal to the number of columns. + If the size of does not + equal the number of rows of this Matrix. + If the size of does not + equal the number of rows of this Matrix. + + + + Creates a new matrix and inserts the given row at the given index. + + The index of where to insert the row. + The row to insert. + A new matrix with the inserted column. + If is . + If is < zero or > the number of rows. + If the size of != the number of columns. + + + + Creates a new matrix with the given row removed. + + The index of the row to remove. + A new matrix without the chosen row. + If is < zero or >= the number of rows. + + + + Copies the values of the given Vector to the specified row. + + The row to copy the values to. + The vector to copy the values from. + If is . + If is less than zero, + or greater than or equal to the number of rows. + If the size of does not + equal the number of columns of this Matrix. + + + + Copies the values of the given Vector to the specified sub-row. + + The row to copy the values to. + The column to start copying to. + The number of elements to copy. + The vector to copy the values from. + If is . + If is less than zero, + or greater than or equal to the number of rows. + If the size of does not + equal the number of columns of this Matrix. + + + + Copies the values of the given array to the specified row. + + The row to copy the values to. + The array to copy the values from. + If is . + If is less than zero, + or greater than or equal to the number of rows. + If the size of does not + equal the number of columns of this Matrix. + + + + Copies the values of a given matrix into a region in this matrix. + + The row to start copying to. + The column to start copying to. + The sub-matrix to copy from. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + + + + Copies the values of a given matrix into a region in this matrix. + + The row to start copying to. + The number of rows to copy. Must be positive. + The column to start copying to. + The number of columns to copy. Must be positive. + The sub-matrix to copy from. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + the size of is not at least x . + If or + is not positive. + + + + Copies the values of a given matrix into a region in this matrix. + + The row to start copying to. + The row of the sub-matrix to start copying from. + The number of rows to copy. Must be positive. + The column to start copying to. + The column of the sub-matrix to start copying from. + The number of columns to copy. Must be positive. + The sub-matrix to copy from. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + the size of is not at least x . + If or + is not positive. + + + + Copies the values of the given Vector to the diagonal. + + The vector to copy the values from. The length of the vector should be + Min(Rows, Columns). + If is . + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + + Copies the values of the given array to the diagonal. + + The array to copy the values from. The length of the vector should be + Min(Rows, Columns). + If is . + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + + Returns the transpose of this matrix. + + The transpose of this matrix. + + + + Puts the transpose of this matrix into the result matrix. + + + + + Returns the conjugate transpose of this matrix. + + The conjugate transpose of this matrix. + + + + Puts the conjugate transpose of this matrix into the result matrix. + + + + + Permute the rows of a matrix according to a permutation. + + The row permutation to apply to this matrix. + + + + Permute the columns of a matrix according to a permutation. + + The column permutation to apply to this matrix. + + + + Concatenates this matrix with the given matrix. + + The matrix to concatenate. + The combined matrix. + + + + + + Concatenates this matrix with the given matrix and places the result into the result matrix. + + The matrix to concatenate. + The combined matrix. + + + + + + Stacks this matrix on top of the given matrix and places the result into the result matrix. + + The matrix to stack this matrix upon. + The combined matrix. + If lower is . + If upper.Columns != lower.Columns. + + + + + + Stacks this matrix on top of the given matrix and places the result into the result matrix. + + The matrix to stack this matrix upon. + The combined matrix. + If lower is . + If upper.Columns != lower.Columns. + + + + + + Diagonally stacks his matrix on top of the given matrix. The new matrix is a M-by-N matrix, + where M = this.Rows + lower.Rows and N = this.Columns + lower.Columns. + The values of off the off diagonal matrices/blocks are set to zero. + + The lower, right matrix. + If lower is . + the combined matrix + + + + + + Diagonally stacks his matrix on top of the given matrix and places the combined matrix into the result matrix. + + The lower, right matrix. + The combined matrix + If lower is . + If the result matrix is . + If the result matrix's dimensions are not (this.Rows + lower.rows) x (this.Columns + lower.Columns). + + + + + + Evaluates whether this matrix is symmetric. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + Evaluates whether this matrix is conjugate symmetric. + + + + + Returns this matrix as a multidimensional array. + The returned array will be independent from this matrix. + A new memory block will be allocated for the array. + + A multidimensional containing the values of this matrix. + + + + Returns the matrix's elements as an array with the data laid out column by column (column major). + The returned array will be independent from this matrix. + A new memory block will be allocated for the array. + +
+            1, 2, 3
+            4, 5, 6  will be returned as  1, 4, 7, 2, 5, 8, 3, 6, 9
+            7, 8, 9
+            
+ An array containing the matrix's elements. + + +
+ + + Returns the matrix's elements as an array with the data laid row by row (row major). + The returned array will be independent from this matrix. + A new memory block will be allocated for the array. + +
+            1, 2, 3
+            4, 5, 6  will be returned as  1, 2, 3, 4, 5, 6, 7, 8, 9
+            7, 8, 9
+            
+ An array containing the matrix's elements. + + +
+ + + Returns this matrix as array of row arrays. + The returned arrays will be independent from this matrix. + A new memory block will be allocated for the arrays. + + + + + Returns this matrix as array of column arrays. + The returned arrays will be independent from this matrix. + A new memory block will be allocated for the arrays. + + + + + Returns the internal multidimensional array of this matrix if, and only if, this matrix is stored by such an array internally. + Otherwise returns null. Changes to the returned array and the matrix will affect each other. + Use ToArray instead if you always need an independent array. + + + + + Returns the internal column by column (column major) array of this matrix if, and only if, this matrix is stored by such arrays internally. + Otherwise returns null. Changes to the returned arrays and the matrix will affect each other. + Use ToColumnMajorArray instead if you always need an independent array. + +
+            1, 2, 3
+            4, 5, 6  will be returned as  1, 4, 7, 2, 5, 8, 3, 6, 9
+            7, 8, 9
+            
+ An array containing the matrix's elements. + + +
+ + + Returns the internal row by row (row major) array of this matrix if, and only if, this matrix is stored by such arrays internally. + Otherwise returns null. Changes to the returned arrays and the matrix will affect each other. + Use ToRowMajorArray instead if you always need an independent array. + +
+            1, 2, 3
+            4, 5, 6  will be returned as  1, 2, 3, 4, 5, 6, 7, 8, 9
+            7, 8, 9
+            
+ An array containing the matrix's elements. + + +
+ + + Returns the internal row arrays of this matrix if, and only if, this matrix is stored by such arrays internally. + Otherwise returns null. Changes to the returned arrays and the matrix will affect each other. + Use ToRowArrays instead if you always need an independent array. + + + + + Returns the internal column arrays of this matrix if, and only if, this matrix is stored by such arrays internally. + Otherwise returns null. Changes to the returned arrays and the matrix will affect each other. + Use ToColumnArrays instead if you always need an independent array. + + + + + Returns an IEnumerable that can be used to iterate through all values of the matrix. + + + The enumerator will include all values, even if they are zero. + The ordering of the values is unspecified (not necessarily column-wise or row-wise). + + + + + Returns an IEnumerable that can be used to iterate through all values of the matrix. + + + The enumerator will include all values, even if they are zero. + The ordering of the values is unspecified (not necessarily column-wise or row-wise). + + + + + Returns an IEnumerable that can be used to iterate through all values of the matrix and their index. + + + The enumerator returns a Tuple with the first two values being the row and column index + and the third value being the value of the element at that index. + The enumerator will include all values, even if they are zero. + + + + + Returns an IEnumerable that can be used to iterate through all values of the matrix and their index. + + + The enumerator returns a Tuple with the first two values being the row and column index + and the third value being the value of the element at that index. + The enumerator will include all values, even if they are zero. + + + + + Returns an IEnumerable that can be used to iterate through all non-zero values of the matrix. + + + The enumerator will skip all elements with a zero value. + + + + + Returns an IEnumerable that can be used to iterate through all non-zero values of the matrix and their index. + + + The enumerator returns a Tuple with the first two values being the row and column index + and the third value being the value of the element at that index. + The enumerator will skip all elements with a zero value. + + + + + Returns an IEnumerable that can be used to iterate through all columns of the matrix. + + + + + Returns an IEnumerable that can be used to iterate through a subset of all columns of the matrix. + + The column to start enumerating over. + The number of columns to enumerating over. + + + + Returns an IEnumerable that can be used to iterate through all columns of the matrix and their index. + + + The enumerator returns a Tuple with the first value being the column index + and the second value being the value of the column at that index. + + + + + Returns an IEnumerable that can be used to iterate through a subset of all columns of the matrix and their index. + + The column to start enumerating over. + The number of columns to enumerating over. + + The enumerator returns a Tuple with the first value being the column index + and the second value being the value of the column at that index. + + + + + Returns an IEnumerable that can be used to iterate through all rows of the matrix. + + + + + Returns an IEnumerable that can be used to iterate through a subset of all rows of the matrix. + + The row to start enumerating over. + The number of rows to enumerating over. + + + + Returns an IEnumerable that can be used to iterate through all rows of the matrix and their index. + + + The enumerator returns a Tuple with the first value being the row index + and the second value being the value of the row at that index. + + + + + Returns an IEnumerable that can be used to iterate through a subset of all rows of the matrix and their index. + + The row to start enumerating over. + The number of rows to enumerating over. + + The enumerator returns a Tuple with the first value being the row index + and the second value being the value of the row at that index. + + + + + Applies a function to each value of this matrix and replaces the value with its result. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + Applies a function to each value of this matrix and replaces the value with its result. + The row and column indices of each value (zero-based) are passed as first arguments to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + Applies a function to each value of this matrix and replaces the value in the result matrix. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + Applies a function to each value of this matrix and replaces the value in the result matrix. + The index of each value (zero-based) is passed as first argument to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + Applies a function to each value of this matrix and replaces the value in the result matrix. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + Applies a function to each value of this matrix and replaces the value in the result matrix. + The index of each value (zero-based) is passed as first argument to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + Applies a function to each value of this matrix and returns the results as a new matrix. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + Applies a function to each value of this matrix and returns the results as a new matrix. + The index of each value (zero-based) is passed as first argument to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + For each row, applies a function f to each element of the row, threading an accumulator argument through the computation. + Returns an array with the resulting accumulator states for each row. + + + + + For each column, applies a function f to each element of the column, threading an accumulator argument through the computation. + Returns an array with the resulting accumulator states for each column. + + + + + Applies a function f to each row vector, threading an accumulator vector argument through the computation. + Returns the resulting accumulator vector. + + + + + Applies a function f to each column vector, threading an accumulator vector argument through the computation. + Returns the resulting accumulator vector. + + + + + Reduces all row vectors by applying a function between two of them, until only a single vector is left. + + + + + Reduces all column vectors by applying a function between two of them, until only a single vector is left. + + + + + Applies a function to each value pair of two matrices and replaces the value in the result vector. + + + + + Applies a function to each value pair of two matrices and returns the results as a new vector. + + + + + Applies a function to update the status with each value pair of two matrices and returns the resulting status. + + + + + Returns a tuple with the index and value of the first element satisfying a predicate, or null if none is found. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns a tuple with the index and values of the first element pair of two matrices of the same size satisfying a predicate, or null if none is found. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if at least one element satisfies a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if at least one element pairs of two matrices of the same size satisfies a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if all elements satisfy a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if all element pairs of two matrices of the same size satisfy a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Adds a scalar to each element of the matrix. + + This operator will allocate new memory for the result. It will + choose the representation of the provided matrix. + The left matrix to add. + The scalar value to add. + The result of the addition. + If is . + + + + Adds a scalar to each element of the matrix. + + This operator will allocate new memory for the result. It will + choose the representation of the provided matrix. + The scalar value to add. + The right matrix to add. + The result of the addition. + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the subtraction. + If and don't have the same dimensions. + If or is . + + + + Subtracts a scalar from each element of a matrix. + + This operator will allocate new memory for the result. It will + choose the representation of the provided matrix. + The left matrix to subtract. + The scalar value to subtract. + The result of the subtraction. + If and don't have the same dimensions. + If or is . + + + + Substracts each element of a matrix from a scalar. + + This operator will allocate new memory for the result. It will + choose the representation of the provided matrix. + The scalar value to subtract. + The right matrix to subtract. + The result of the subtraction. + If and don't have the same dimensions. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Divides a scalar with a matrix. + + The scalar to divide. + The matrix. + The result of the division. + If is . + + + + Divides a matrix with a scalar. + + The matrix to divide. + The scalar value. + The result of the division. + If is . + + + + Computes the pointwise remainder (% operator), where the result has the sign of the dividend, + of each element of the matrix of the given divisor. + + The matrix whose elements we want to compute the modulus of. + The divisor to use. + The result of the calculation + If is . + + + + Computes the pointwise remainder (% operator), where the result has the sign of the dividend, + of the given dividend of each element of the matrix. + + The dividend we want to compute the modulus of. + The matrix whose elements we want to use as divisor. + The result of the calculation + If is . + + + + Computes the pointwise remainder (% operator), where the result has the sign of the dividend, + of each element of two matrices. + + The matrix whose elements we want to compute the remainder of. + The divisor to use. + If and are not the same size. + If is . + + + + Computes the sqrt of a matrix pointwise + + The input matrix + + + + + Computes the exponential of a matrix pointwise + + The input matrix + + + + + Computes the log of a matrix pointwise + + The input matrix + + + + + Computes the log10 of a matrix pointwise + + The input matrix + + + + + Computes the sin of a matrix pointwise + + The input matrix + + + + + Computes the cos of a matrix pointwise + + The input matrix + + + + + Computes the tan of a matrix pointwise + + The input matrix + + + + + Computes the asin of a matrix pointwise + + The input matrix + + + + + Computes the acos of a matrix pointwise + + The input matrix + + + + + Computes the atan of a matrix pointwise + + The input matrix + + + + + Computes the sinh of a matrix pointwise + + The input matrix + + + + + Computes the cosh of a matrix pointwise + + The input matrix + + + + + Computes the tanh of a matrix pointwise + + The input matrix + + + + + Computes the absolute value of a matrix pointwise + + The input matrix + + + + + Computes the floor of a matrix pointwise + + The input matrix + + + + + Computes the ceiling of a matrix pointwise + + The input matrix + + + + + Computes the rounded value of a matrix pointwise + + The input matrix + + + + + Computes the Cholesky decomposition for a matrix. + + The Cholesky decomposition object. + + + + Computes the LU decomposition for a matrix. + + The LU decomposition object. + + + + Computes the QR decomposition for a matrix. + + The type of QR factorization to perform. + The QR decomposition object. + + + + Computes the QR decomposition for a matrix using Modified Gram-Schmidt Orthogonalization. + + The QR decomposition object. + + + + Computes the SVD decomposition for a matrix. + + Compute the singular U and VT vectors or not. + The SVD decomposition object. + + + + Computes the EVD decomposition for a matrix. + + The EVD decomposition object. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix (this matrix), b is the solution vector and x is the unknown vector. + + The solution vector b. + The result vector x. + The iterative solver to use. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + Solves the matrix equation AX = B, where A is the coefficient matrix (this matrix), B is the solution matrix and X is the unknown matrix. + + The solution matrix B. + The result matrix X + The iterative solver to use. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix (this matrix), b is the solution vector and x is the unknown vector. + + The solution vector b. + The result vector x. + The iterative solver to use. + Criteria to control when to stop iterating. + The preconditioner to use for approximations. + + + + Solves the matrix equation AX = B, where A is the coefficient matrix (this matrix), B is the solution matrix and X is the unknown matrix. + + The solution matrix B. + The result matrix X + The iterative solver to use. + Criteria to control when to stop iterating. + The preconditioner to use for approximations. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix (this matrix), b is the solution vector and x is the unknown vector. + + The solution vector b. + The result vector x. + The iterative solver to use. + Criteria to control when to stop iterating. + + + + Solves the matrix equation AX = B, where A is the coefficient matrix (this matrix), B is the solution matrix and X is the unknown matrix. + + The solution matrix B. + The result matrix X + The iterative solver to use. + Criteria to control when to stop iterating. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix (this matrix), b is the solution vector and x is the unknown vector. + + The solution vector b. + The iterative solver to use. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + The result vector x. + + + + Solves the matrix equation AX = B, where A is the coefficient matrix (this matrix), B is the solution matrix and X is the unknown matrix. + + The solution matrix B. + The iterative solver to use. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + The result matrix X. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix (this matrix), b is the solution vector and x is the unknown vector. + + The solution vector b. + The iterative solver to use. + Criteria to control when to stop iterating. + The preconditioner to use for approximations. + The result vector x. + + + + Solves the matrix equation AX = B, where A is the coefficient matrix (this matrix), B is the solution matrix and X is the unknown matrix. + + The solution matrix B. + The iterative solver to use. + Criteria to control when to stop iterating. + The preconditioner to use for approximations. + The result matrix X. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix (this matrix), b is the solution vector and x is the unknown vector. + + The solution vector b. + The iterative solver to use. + Criteria to control when to stop iterating. + The result vector x. + + + + Solves the matrix equation AX = B, where A is the coefficient matrix (this matrix), B is the solution matrix and X is the unknown matrix. + + The solution matrix B. + The iterative solver to use. + Criteria to control when to stop iterating. + The result matrix X. + + + + Converts a matrix to single precision. + + + + + Converts a matrix to double precision. + + + + + Converts a matrix to single precision complex numbers. + + + + + Converts a matrix to double precision complex numbers. + + + + + Gets a single precision complex matrix with the real parts from the given matrix. + + + + + Gets a double precision complex matrix with the real parts from the given matrix. + + + + + Gets a real matrix representing the real parts of a complex matrix. + + + + + Gets a real matrix representing the real parts of a complex matrix. + + + + + Gets a real matrix representing the imaginary parts of a complex matrix. + + + + + Gets a real matrix representing the imaginary parts of a complex matrix. + + + + + Existing data may not be all zeros, so clearing may be necessary + if not all of it will be overwritten anyway. + + + + + If existing data is assumed to be all zeros already, + clearing it may be skipped if applicable. + + + + + Allow skipping zero entries (without enforcing skipping them). + When enumerating sparse matrices this can significantly speed up operations. + + + + + Force applying the operation to all fields even if they are zero. + + + + + It is not known yet whether a matrix is symmetric or not. + + + + + A matrix is symmetric + + + + + A matrix is hermitian (conjugate symmetric). + + + + + A matrix is not symmetric + + + + + Defines an that uses a cancellation token as stop criterion. + + + + + Initializes a new instance of the class. + + + + + Initializes a new instance of the class. + + + + + Determines the status of the iterative calculation based on the stop criteria stored + by the current . Result is set into Status field. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual stop criteria may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Gets the current calculation status. + + + + + Resets the to the pre-calculation state. + + + + + Clones the current and its settings. + + A new instance of the class. + + + + Stop criterion that delegates the status determination to a delegate. + + + + + Create a new instance of this criterion with a custom implementation. + + Custom implementation with the same signature and semantics as the DetermineStatus method. + + + + Determines the status of the iterative calculation by delegating it to the provided delegate. + Result is set into Status field. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual stop criteria may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Gets the current calculation status. + + + + + Resets the IIterationStopCriterion to the pre-calculation state. + + + + + Clones this criterion and its settings. + + + + + Monitors an iterative calculation for signs of divergence. + + + + + The maximum relative increase the residual may experience without triggering a divergence warning. + + + + + The number of iterations over which a residual increase should be tracked before issuing a divergence warning. + + + + + The status of the calculation + + + + + The array that holds the tracking information. + + + + + The iteration number of the last iteration. + + + + + Initializes a new instance of the class with the specified maximum + relative increase and the specified minimum number of tracking iterations. + + The maximum relative increase that the residual may experience before a divergence warning is issued. + The minimum number of iterations over which the residual must grow before a divergence warning is issued. + + + + Gets or sets the maximum relative increase that the residual may experience before a divergence warning is issued. + + Thrown if the Maximum is set to zero or below. + + + + Gets or sets the minimum number of iterations over which the residual must grow before + issuing a divergence warning. + + Thrown if the value is set to less than one. + + + + Determines the status of the iterative calculation based on the stop criteria stored + by the current . Result is set into Status field. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual stop criteria may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Detect if solution is diverging + + true if diverging, otherwise false + + + + Gets required history Length + + + + + Gets the current calculation status. + + + + + Resets the to the pre-calculation state. + + + + + Clones the current and its settings. + + A new instance of the class. + + + + Defines an that monitors residuals for NaN's. + + + + + The status of the calculation + + + + + The iteration number of the last iteration. + + + + + Determines the status of the iterative calculation based on the stop criteria stored + by the current . Result is set into Status field. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual stop criteria may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Gets the current calculation status. + + + + + Resets the to the pre-calculation state. + + + + + Clones the current and its settings. + + A new instance of the class. + + + + The base interface for classes that provide stop criteria for iterative calculations. + + + + + Determines the status of the iterative calculation based on the stop criteria stored + by the current IIterationStopCriterion. Status is set to Status field of current object. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual stop criteria may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Gets the current calculation status. + + is not a legal value. Status should be set in implementation. + + + + Resets the IIterationStopCriterion to the pre-calculation state. + + To implementers: Invoking this method should not clear the user defined + property values, only the state that is used to track the progress of the + calculation. + + + + Defines the interface for classes that solve the matrix equation Ax = b in + an iterative manner. + + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + Defines the interface for objects that can create an iterative solver with + specific settings. This interface is used to pass iterative solver creation + setup information around. + + + + + Gets the type of the solver that will be created by this setup object. + + + + + Gets type of preconditioner, if any, that will be created by this setup object. + + + + + Creates the iterative solver to be used. + + + + + Creates the preconditioner to be used by default (can be overwritten). + + + + + Gets the relative speed of the solver. + + Returns a value between 0 and 1, inclusive. + + + + Gets the relative reliability of the solver. + + Returns a value between 0 and 1 inclusive. + + + + The base interface for preconditioner classes. + + + + Preconditioners are used by iterative solvers to improve the convergence + speed of the solving process. Increase in convergence speed + is related to the number of iterations necessary to get a converged solution. + So while in general the use of a preconditioner means that the iterative + solver will perform fewer iterations it does not guarantee that the actual + solution time decreases given that some preconditioners can be expensive to + setup and run. + + + Note that in general changes to the matrix will invalidate the preconditioner + if the changes occur after creating the preconditioner. + + + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix on which the preconditioner is based. + + + + Approximates the solution to the matrix equation Mx = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + Defines an that monitors the numbers of iteration + steps as stop criterion. + + + + + The default value for the maximum number of iterations the process is allowed + to perform. + + + + + The maximum number of iterations the calculation is allowed to perform. + + + + + The status of the calculation + + + + + Initializes a new instance of the class with the default maximum + number of iterations. + + + + + Initializes a new instance of the class with the specified maximum + number of iterations. + + The maximum number of iterations the calculation is allowed to perform. + + + + Gets or sets the maximum number of iterations the calculation is allowed to perform. + + Thrown if the Maximum is set to a negative value. + + + + Returns the maximum number of iterations to the default. + + + + + Determines the status of the iterative calculation based on the stop criteria stored + by the current . Result is set into Status field. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual stop criteria may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Gets the current calculation status. + + + + + Resets the to the pre-calculation state. + + + + + Clones the current and its settings. + + A new instance of the class. + + + + Iterative Calculation Status + + + + + An iterator that is used to check if an iterative calculation should continue or stop. + + + + + The collection that holds all the stop criteria and the flag indicating if they should be added + to the child iterators. + + + + + The status of the iterator. + + + + + Initializes a new instance of the class with the default stop criteria. + + + + + Initializes a new instance of the class with the specified stop criteria. + + + The specified stop criteria. Only one stop criterion of each type can be passed in. None + of the stop criteria will be passed on to child iterators. + + + + + Initializes a new instance of the class with the specified stop criteria. + + + The specified stop criteria. Only one stop criterion of each type can be passed in. None + of the stop criteria will be passed on to child iterators. + + + + + Gets the current calculation status. + + + + + Determines the status of the iterative calculation based on the stop criteria stored + by the current . Result is set into Status field. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual iterators may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Indicates to the iterator that the iterative process has been cancelled. + + + Does not reset the stop-criteria. + + + + + Resets the to the pre-calculation state. + + + + + Creates a deep clone of the current iterator. + + The deep clone of the current iterator. + + + + Defines an that monitors residuals as stop criterion. + + + + + The maximum value for the residual below which the calculation is considered converged. + + + + + The minimum number of iterations for which the residual has to be below the maximum before + the calculation is considered converged. + + + + + The status of the calculation + + + + + The number of iterations since the residuals got below the maximum. + + + + + The iteration number of the last iteration. + + + + + Initializes a new instance of the class with the specified + maximum residual and minimum number of iterations. + + + The maximum value for the residual below which the calculation is considered converged. + + + The minimum number of iterations for which the residual has to be below the maximum before + the calculation is considered converged. + + + + + Gets or sets the maximum value for the residual below which the calculation is considered + converged. + + Thrown if the Maximum is set to a negative value. + + + + Gets or sets the minimum number of iterations for which the residual has to be + below the maximum before the calculation is considered converged. + + Thrown if the BelowMaximumFor is set to a value less than 1. + + + + Determines the status of the iterative calculation based on the stop criteria stored + by the current . Result is set into Status field. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual stop criteria may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Gets the current calculation status. + + + + + Resets the to the pre-calculation state. + + + + + Clones the current and its settings. + + A new instance of the class. + + + + Loads the available objects from the specified assembly. + + The assembly which will be searched for setup objects. + If true, types that fail to load are simply ignored. Otherwise the exception is rethrown. + The types that should not be loaded. + + + + Loads the available objects from the specified assembly. + + The type in the assembly which should be searched for setup objects. + If true, types that fail to load are simply ignored. Otherwise the exception is rethrown. + The types that should not be loaded. + + + + Loads the available objects from the specified assembly. + + The of the assembly that should be searched for setup objects. + If true, types that fail to load are simply ignored. Otherwise the exception is rethrown. + The types that should not be loaded. + + + + Loads the available objects from the Math.NET Numerics assembly. + + The types that should not be loaded. + + + + Loads the available objects from the Math.NET Numerics assembly. + + + + + A unit preconditioner. This preconditioner does not actually do anything + it is only used when running an without + a preconditioner. + + + + + The coefficient matrix on which this preconditioner operates. + Is used to check dimensions on the different vectors that are processed. + + + + + Initializes the preconditioner and loads the internal data structures. + + + The matrix upon which the preconditioner is based. + + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + If and do not have the same size. + + + - or - + + + If the size of is different the number of rows of the coefficient matrix. + + + + + + True if the matrix storage format is dense. + + + + + True if all fields of this matrix can be set to any value. + False if some fields are fixed, like on a diagonal matrix. + + + + + True if the specified field can be set to any value. + False if the field is fixed, like an off-diagonal field on a diagonal matrix. + + + + + Retrieves the requested element without range checking. + + + + + Sets the element without range checking. + + + + + Evaluate the row and column at a specific data index. + + + + + True if the vector storage format is dense. + + + + + Retrieves the requested element without range checking. + + + + + Sets the element without range checking. + + + + + True if the matrix storage format is dense. + + + + + True if all fields of this matrix can be set to any value. + False if some fields are fixed, like on a diagonal matrix. + + + + + True if the specified field can be set to any value. + False if the field is fixed, like an off-diagonal field on a diagonal matrix. + + + + + Retrieves the requested element without range checking. + + + + + Sets the element without range checking. + + + + + Returns a hash code for this instance. + + + A hash code for this instance, suitable for use in hashing algorithms and data structures like a hash table. + + + + + True if the matrix storage format is dense. + + + + + True if all fields of this matrix can be set to any value. + False if some fields are fixed, like on a diagonal matrix. + + + + + True if the specified field can be set to any value. + False if the field is fixed, like an off-diagonal field on a diagonal matrix. + + + + + Gets or sets the value at the given row and column, with range checking. + + + The row of the element. + + + The column of the element. + + The value to get or set. + This method is ranged checked. and + to get and set values without range checking. + + + + Retrieves the requested element without range checking. + + + The row of the element. + + + The column of the element. + + + The requested element. + + Not range-checked. + + + + Sets the element without range checking. + + The row of the element. + The column of the element. + The value to set the element to. + WARNING: This method is not thread safe. Use "lock" with it and be sure to avoid deadlocks. + + + + Indicates whether the current object is equal to another object of the same type. + + + An object to compare with this object. + + + true if the current object is equal to the parameter; otherwise, false. + + + + + Determines whether the specified is equal to the current . + + + true if the specified is equal to the current ; otherwise, false. + + The to compare with the current . + + + + Serves as a hash function for a particular type. + + + A hash code for the current . + + + + The state array will not be modified, unless it is the same instance as the target array (which is allowed). + + + The state array will not be modified, unless it is the same instance as the target array (which is allowed). + + + The state array will not be modified, unless it is the same instance as the target array (which is allowed). + + + The state array will not be modified, unless it is the same instance as the target array (which is allowed). + + + + The array containing the row indices of the existing rows. Element "i" of the array gives the index of the + element in the array that is first non-zero element in a row "i". + The last value is equal to ValueCount, so that the number of non-zero entries in row "i" is always + given by RowPointers[i+i] - RowPointers[i]. This array thus has length RowCount+1. + + + + + An array containing the column indices of the non-zero values. Element "j" of the array + is the number of the column in matrix that contains the j-th value in the array. + + + + + Array that contains the non-zero elements of matrix. Values of the non-zero elements of matrix are mapped into the values + array using the row-major storage mapping described in a compressed sparse row (CSR) format. + + + + + Gets the number of non zero elements in the matrix. + + The number of non zero elements. + + + + True if the matrix storage format is dense. + + + + + True if all fields of this matrix can be set to any value. + False if some fields are fixed, like on a diagonal matrix. + + + + + True if the specified field can be set to any value. + False if the field is fixed, like an off-diagonal field on a diagonal matrix. + + + + + Retrieves the requested element without range checking. + + + The row of the element. + + + The column of the element. + + + The requested element. + + Not range-checked. + + + + Sets the element without range checking. + + The row of the element. + The column of the element. + The value to set the element to. + WARNING: This method is not thread safe. Use "lock" with it and be sure to avoid deadlocks. + + + + Delete value from internal storage + + Index of value in nonZeroValues array + Row number of matrix + WARNING: This method is not thread safe. Use "lock" with it and be sure to avoid deadlocks + + + + Find item Index in nonZeroValues array + + Matrix row index + Matrix column index + Item index + WARNING: This method is not thread safe. Use "lock" with it and be sure to avoid deadlocks + + + + Calculates the amount with which to grow the storage array's if they need to be + increased in size. + + The amount grown. + + + + Returns a hash code for this instance. + + + A hash code for this instance, suitable for use in hashing algorithms and data structures like a hash table. + + + + + Array that contains the indices of the non-zero values. + + + + + Array that contains the non-zero elements of the vector. + + + + + Gets the number of non-zero elements in the vector. + + + + + True if the vector storage format is dense. + + + + + Retrieves the requested element without range checking. + + + + + Sets the element without range checking. + + + + + Calculates the amount with which to grow the storage array's if they need to be + increased in size. + + The amount grown. + + + + Returns a hash code for this instance. + + + A hash code for this instance, suitable for use in hashing algorithms and data structures like a hash table. + + + + + True if the vector storage format is dense. + + + + + Gets or sets the value at the given index, with range checking. + + + The index of the element. + + The value to get or set. + This method is ranged checked. and + to get and set values without range checking. + + + + Retrieves the requested element without range checking. + + The index of the element. + The requested element. + Not range-checked. + + + + Sets the element without range checking. + + The index of the element. + The value to set the element to. + WARNING: This method is not thread safe. Use "lock" with it and be sure to avoid deadlocks. + + + + Indicates whether the current object is equal to another object of the same type. + + + An object to compare with this object. + + + true if the current object is equal to the parameter; otherwise, false. + + + + + Determines whether the specified is equal to the current . + + + true if the specified is equal to the current ; otherwise, false. + + The to compare with the current . + + + + Serves as a hash function for a particular type. + + + A hash code for the current . + + + + + Defines the generic class for Vector classes. + + Supported data types are double, single, , and . + + + + The zero value for type T. + + + + + The value of 1.0 for type T. + + + + + Negates vector and save result to + + Target vector + + + + Complex conjugates vector and save result to + + Target vector + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + The scalar to add. + The vector to store the result of the addition. + + + + Adds another vector to this vector and stores the result into the result vector. + + The vector to add to this one. + The vector to store the result of the addition. + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The vector to store the result of the subtraction. + + + + Subtracts each element of the vector from a scalar and stores the result in the result vector. + + The scalar to subtract from. + The vector to store the result of the subtraction. + + + + Subtracts another vector to this vector and stores the result into the result vector. + + The vector to subtract from this one. + The vector to store the result of the subtraction. + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + The scalar to multiply. + The vector to store the result of the multiplication. + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Computes the outer product M[i,j] = u[i]*v[j] of this and another vector and stores the result in the result matrix. + + The other vector + The matrix to store the result of the product. + + + + Divides each element of the vector by a scalar and stores the result in the result vector. + + The scalar denominator to use. + The vector to store the result of the division. + + + + Divides a scalar by each element of the vector and stores the result in the result vector. + + The scalar numerator to use. + The vector to store the result of the division. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the division. + + + + Pointwise raise this vector to an exponent and store the result into the result vector. + + The exponent to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Adds a scalar to each element of the vector. + + The scalar to add. + A copy of the vector with the scalar added. + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + The scalar to add. + The vector to store the result of the addition. + If this vector and are not the same size. + + + + Adds another vector to this vector. + + The vector to add to this one. + A new vector containing the sum of both vectors. + If this vector and are not the same size. + + + + Adds another vector to this vector and stores the result into the result vector. + + The vector to add to this one. + The vector to store the result of the addition. + If this vector and are not the same size. + If this vector and are not the same size. + + + + Subtracts a scalar from each element of the vector. + + The scalar to subtract. + A new vector containing the subtraction of this vector and the scalar. + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The vector to store the result of the subtraction. + If this vector and are not the same size. + + + + Subtracts each element of the vector from a scalar. + + The scalar to subtract from. + A new vector containing the subtraction of the scalar and this vector. + + + + Subtracts each element of the vector from a scalar and stores the result in the result vector. + + The scalar to subtract from. + The vector to store the result of the subtraction. + If this vector and are not the same size. + + + + Returns a negated vector. + + The negated vector. + Added as an alternative to the unary negation operator. + + + + Negates vector and save result to + + Target vector + + + + Subtracts another vector from this vector. + + The vector to subtract from this one. + A new vector containing the subtraction of the the two vectors. + If this vector and are not the same size. + + + + Subtracts another vector to this vector and stores the result into the result vector. + + The vector to subtract from this one. + The vector to store the result of the subtraction. + If this vector and are not the same size. + If this vector and are not the same size. + + + + Return vector with complex conjugate values of the source vector + + Conjugated vector + + + + Complex conjugates vector and save result to + + Target vector + + + + Multiplies a scalar to each element of the vector. + + The scalar to multiply. + A new vector that is the multiplication of the vector and the scalar. + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + The scalar to multiply. + The vector to store the result of the multiplication. + If this vector and are not the same size. + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + If is not of the same size. + + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + If is not of the same size. + If is . + + + + + Divides each element of the vector by a scalar. + + The scalar to divide with. + A new vector that is the division of the vector and the scalar. + + + + Divides each element of the vector by a scalar and stores the result in the result vector. + + The scalar to divide with. + The vector to store the result of the division. + If this vector and are not the same size. + + + + Divides a scalar by each element of the vector. + + The scalar to divide. + A new vector that is the division of the vector and the scalar. + + + + Divides a scalar by each element of the vector and stores the result in the result vector. + + The scalar to divide. + The vector to store the result of the division. + If this vector and are not the same size. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector containing the result. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector containing the result. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (vector % divisor), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector containing the result. + + + + Computes the remainder (vector % divisor), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (dividend % vector), where the result has the sign of the dividend, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector containing the result. + + + + Computes the remainder (dividend % vector), where the result has the sign of the dividend, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Pointwise multiplies this vector with another vector. + + The vector to pointwise multiply with this one. + A new vector which is the pointwise multiplication of the two vectors. + If this vector and are not the same size. + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + If this vector and are not the same size. + If this vector and are not the same size. + + + + Pointwise divide this vector with another vector. + + The pointwise denominator vector to use. + A new vector which is the pointwise division of the two vectors. + If this vector and are not the same size. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The vector to store the result of the pointwise division. + If this vector and are not the same size. + If this vector and are not the same size. + + + + Pointwise raise this vector to an exponent. + + The exponent to raise this vector values to. + + + + Pointwise raise this vector to an exponent and store the result into the result vector. + + The exponent to raise this vector values to. + The matrix to store the result into. + If this vector and are not the same size. + + + + Pointwise raise this vector to an exponent and store the result into the result vector. + + The exponent to raise this vector values to. + + + + Pointwise raise this vector to an exponent. + + The exponent to raise this vector values to. + The vector to store the result into. + If this vector and are not the same size. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this vector with another vector. + + The pointwise denominator vector to use. + If this vector and are not the same size. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The vector to store the result of the pointwise modulus. + If this vector and are not the same size. + If this vector and are not the same size. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this vector with another vector. + + The pointwise denominator vector to use. + If this vector and are not the same size. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The vector to store the result of the pointwise remainder. + If this vector and are not the same size. + If this vector and are not the same size. + + + + Helper function to apply a unary function to a vector. The function + f modifies the vector given to it in place. Before its + called, a copy of the 'this' vector with the same dimension is + first created, then passed to f. The copy is returned as the result + + Function which takes a vector, modifies it in place and returns void + New instance of vector which is the result + + + + Helper function to apply a unary function which modifies a vector + in place. + + Function which takes a vector, modifies it in place and returns void + The vector where the result is to be stored + If this vector and are not the same size. + + + + Helper function to apply a binary function which takes a scalar and + a vector and modifies the latter in place. A copy of the "this" + vector is therefore first made and then passed to f together with + the scalar argument. The copy is then returned as the result + + Function which takes a scalar and a vector, modifies the vector in place and returns void + The scalar to be passed to the function + The resulting vector + + + + Helper function to apply a binary function which takes a scalar and + a vector, modifies the latter in place and returns void. + + Function which takes a scalar and a vector, modifies the vector in place and returns void + The scalar to be passed to the function + If this vector and are not the same size. + + + + Helper function to apply a binary function which takes two vectors + and modifies the latter in place. A copy of the "this" vector is + first made and then passed to f together with the other vector. The + copy is then returned as the result + + Function which takes two vectors, modifies the second in place and returns void + The other vector to be passed to the function as argument. It is not modified + The resulting vector + If this vector and are not the same size. + + + + Helper function to apply a binary function which takes two vectors + and modifies the second one in place + + Function which takes two vectors, modifies the second in place and returns void + The other vector to be passed to the function as argument. It is not modified + The resulting vector + If this vector and are not the same size. + + + + Pointwise applies the exponent function to each value. + + + + + Pointwise applies the exponent function to each value. + + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the natural logarithm function to each value. + + + + + Pointwise applies the natural logarithm function to each value. + + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the abs function to each value + + + + + Pointwise applies the abs function to each value + + The vector to store the result + + + + Pointwise applies the acos function to each value + + + + + Pointwise applies the acos function to each value + + The vector to store the result + + + + Pointwise applies the asin function to each value + + + + + Pointwise applies the asin function to each value + + The vector to store the result + + + + Pointwise applies the atan function to each value + + + + + Pointwise applies the atan function to each value + + The vector to store the result + + + + Pointwise applies the atan2 function to each value of the current + vector and a given other vector being the 'x' of atan2 and the + 'this' vector being the 'y' + + + + + + + Pointwise applies the atan2 function to each value of the current + vector and a given other vector being the 'x' of atan2 and the + 'this' vector being the 'y' + + + + + + + Pointwise applies the ceiling function to each value + + + + + Pointwise applies the ceiling function to each value + + The vector to store the result + + + + Pointwise applies the cos function to each value + + + + + Pointwise applies the cos function to each value + + The vector to store the result + + + + Pointwise applies the cosh function to each value + + + + + Pointwise applies the cosh function to each value + + The vector to store the result + + + + Pointwise applies the floor function to each value + + + + + Pointwise applies the floor function to each value + + The vector to store the result + + + + Pointwise applies the log10 function to each value + + + + + Pointwise applies the log10 function to each value + + The vector to store the result + + + + Pointwise applies the round function to each value + + + + + Pointwise applies the round function to each value + + The vector to store the result + + + + Pointwise applies the sign function to each value + + + + + Pointwise applies the sign function to each value + + The vector to store the result + + + + Pointwise applies the sin function to each value + + + + + Pointwise applies the sin function to each value + + The vector to store the result + + + + Pointwise applies the sinh function to each value + + + + + Pointwise applies the sinh function to each value + + The vector to store the result + + + + Pointwise applies the sqrt function to each value + + + + + Pointwise applies the sqrt function to each value + + The vector to store the result + + + + Pointwise applies the tan function to each value + + + + + Pointwise applies the tan function to each value + + The vector to store the result + + + + Pointwise applies the tanh function to each value + + + + + Pointwise applies the tanh function to each value + + The vector to store the result + + + + Computes the outer product M[i,j] = u[i]*v[j] of this and another vector. + + The other vector + + + + Computes the outer product M[i,j] = u[i]*v[j] of this and another vector and stores the result in the result matrix. + + The other vector + The matrix to store the result of the product. + + + + Pointwise applies the minimum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the minimum with a scalar to each value. + + The scalar value to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the maximum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the maximum with a scalar to each value. + + The scalar value to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the absolute minimum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the absolute minimum with a scalar to each value. + + The scalar value to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the absolute maximum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the absolute maximum with a scalar to each value. + + The scalar value to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the minimum with the values of another vector to each value. + + The vector with the values to compare to. + + + + Pointwise applies the minimum with the values of another vector to each value. + + The vector with the values to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the maximum with the values of another vector to each value. + + The vector with the values to compare to. + + + + Pointwise applies the maximum with the values of another vector to each value. + + The vector with the values to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the absolute minimum with the values of another vector to each value. + + The vector with the values to compare to. + + + + Pointwise applies the absolute minimum with the values of another vector to each value. + + The vector with the values to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the absolute maximum with the values of another vector to each value. + + The vector with the values to compare to. + + + + Pointwise applies the absolute maximum with the values of another vector to each value. + + The vector with the values to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = (sum(abs(this[i])^p))^(1/p) + + + + Normalizes this vector to a unit vector with respect to the p-norm. + + The p value. + This vector normalized to a unit vector with respect to the p-norm. + + + + Returns the value of the absolute minimum element. + + The value of the absolute minimum element. + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the value of the absolute maximum element. + + The value of the absolute maximum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Returns the value of maximum element. + + The value of maximum element. + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the value of the minimum element. + + The value of the minimum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Computes the sum of the absolute value of the vector's elements. + + The sum of the absolute value of the vector's elements. + + + + Indicates whether the current object is equal to another object of the same type. + + An object to compare with this object. + + true if the current object is equal to the parameter; otherwise, false. + + + + + Determines whether the specified is equal to this instance. + + The to compare with this instance. + + true if the specified is equal to this instance; otherwise, false. + + + + + Returns a hash code for this instance. + + + A hash code for this instance, suitable for use in hashing algorithms and data structures like a hash table. + + + + + Returns an enumerator that iterates through the collection. + + + A that can be used to iterate through the collection. + + + + + Returns an enumerator that iterates through a collection. + + + An object that can be used to iterate through the collection. + + + + + Returns a string that describes the type, dimensions and shape of this vector. + + + + + Returns a string that represents the content of this vector, column by column. + + Maximum number of entries and thus lines per column. Typical value: 12; Minimum: 3. + Maximum number of chatacters per line over all columns. Typical value: 80; Minimum: 16. + Character to use to print if there is not enough space to print all entries. Typical value: "..". + Character to use to separate two coluns on a line. Typical value: " " (2 spaces). + Character to use to separate two rows/lines. Typical value: Environment.NewLine. + Function to provide a string for any given entry value. + + + + Returns a string that represents the content of this vector, column by column. + + Maximum number of entries and thus lines per column. Typical value: 12; Minimum: 3. + Maximum number of chatacters per line over all columns. Typical value: 80; Minimum: 16. + Floating point format string. Can be null. Default value: G6. + Format provider or culture. Can be null. + + + + Returns a string that represents the content of this vector, column by column. + + Floating point format string. Can be null. Default value: G6. + Format provider or culture. Can be null. + + + + Returns a string that summarizes this vector, column by column and with a type header. + + Maximum number of entries and thus lines per column. Typical value: 12; Minimum: 3. + Maximum number of chatacters per line over all columns. Typical value: 80; Minimum: 16. + Floating point format string. Can be null. Default value: G6. + Format provider or culture. Can be null. + + + + Returns a string that summarizes this vector. + The maximum number of cells can be configured in the class. + + + + + Returns a string that summarizes this vector. + The maximum number of cells can be configured in the class. + The format string is ignored. + + + + + Initializes a new instance of the Vector class. + + + + + Gets the raw vector data storage. + + + + + Gets the length or number of dimensions of this vector. + + + + Gets or sets the value at the given . + The index of the value to get or set. + The value of the vector at the given . + If is negative or + greater than the size of the vector. + + + Gets the value at the given without range checking.. + The index of the value to get or set. + The value of the vector at the given . + + + Sets the at the given without range checking.. + The index of the value to get or set. + The value to set. + + + + Resets all values to zero. + + + + + Sets all values of a subvector to zero. + + + + + Set all values whose absolute value is smaller than the threshold to zero, in-place. + + + + + Set all values that meet the predicate to zero, in-place. + + + + + Returns a deep-copy clone of the vector. + + A deep-copy clone of the vector. + + + + Set the values of this vector to the given values. + + The array containing the values to use. + If is . + If is not the same size as this vector. + + + + Copies the values of this vector into the target vector. + + The vector to copy elements into. + If is . + If is not the same size as this vector. + + + + Creates a vector containing specified elements. + + The first element to begin copying from. + The number of elements to copy. + A vector containing a copy of the specified elements. + If is not positive or + greater than or equal to the size of the vector. + If + is greater than or equal to the size of the vector. + + If is not positive. + + + + Copies the values of a given vector into a region in this vector. + + The field to start copying to + The number of fields to cpy. Must be positive. + The sub-vector to copy from. + If is + + + + Copies the requested elements from this vector to another. + + The vector to copy the elements to. + The element to start copying from. + The element to start copying to. + The number of elements to copy. + + + + Returns the data contained in the vector as an array. + The returned array will be independent from this vector. + A new memory block will be allocated for the array. + + The vector's data as an array. + + + + Returns the internal array of this vector if, and only if, this vector is stored by such an array internally. + Otherwise returns null. Changes to the returned array and the vector will affect each other. + Use ToArray instead if you always need an independent array. + + + + + Create a matrix based on this vector in column form (one single column). + + + This vector as a column matrix. + + + + + Create a matrix based on this vector in row form (one single row). + + + This vector as a row matrix. + + + + + Returns an IEnumerable that can be used to iterate through all values of the vector. + + + The enumerator will include all values, even if they are zero. + + + + + Returns an IEnumerable that can be used to iterate through all values of the vector. + + + The enumerator will include all values, even if they are zero. + + + + + Returns an IEnumerable that can be used to iterate through all values of the vector and their index. + + + The enumerator returns a Tuple with the first value being the element index + and the second value being the value of the element at that index. + The enumerator will include all values, even if they are zero. + + + + + Returns an IEnumerable that can be used to iterate through all values of the vector and their index. + + + The enumerator returns a Tuple with the first value being the element index + and the second value being the value of the element at that index. + The enumerator will include all values, even if they are zero. + + + + + Returns an IEnumerable that can be used to iterate through all non-zero values of the vector. + + + The enumerator will skip all elements with a zero value. + + + + + Returns an IEnumerable that can be used to iterate through all non-zero values of the vector and their index. + + + The enumerator returns a Tuple with the first value being the element index + and the second value being the value of the element at that index. + The enumerator will skip all elements with a zero value. + + + + + Applies a function to each value of this vector and replaces the value with its result. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value of this vector and replaces the value with its result. + The index of each value (zero-based) is passed as first argument to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value of this vector and replaces the value in the result vector. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value of this vector and replaces the value in the result vector. + The index of each value (zero-based) is passed as first argument to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value of this vector and replaces the value in the result vector. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value of this vector and replaces the value in the result vector. + The index of each value (zero-based) is passed as first argument to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value of this vector and returns the results as a new vector. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value of this vector and returns the results as a new vector. + The index of each value (zero-based) is passed as first argument to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value pair of two vectors and replaces the value in the result vector. + + + + + Applies a function to each value pair of two vectors and returns the results as a new vector. + + + + + Applies a function to update the status with each value pair of two vectors and returns the resulting status. + + + + + Returns a tuple with the index and value of the first element satisfying a predicate, or null if none is found. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns a tuple with the index and values of the first element pair of two vectors of the same size satisfying a predicate, or null if none is found. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if at least one element satisfies a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if at least one element pairs of two vectors of the same size satisfies a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if all elements satisfy a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if all element pairs of two vectors of the same size satisfy a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns a Vector containing the same values of . + + This method is included for completeness. + The vector to get the values from. + A vector containing the same values as . + If is . + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Adds a scalar to each element of a vector. + + The vector to add to. + The scalar value to add. + The result of the addition. + If is . + + + + Adds a scalar to each element of a vector. + + The scalar value to add. + The vector to add to. + The result of the addition. + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Subtracts a scalar from each element of a vector. + + The vector to subtract from. + The scalar value to subtract. + The result of the subtraction. + If is . + + + + Substracts each element of a vector from a scalar. + + The scalar value to subtract from. + The vector to subtract. + The result of the subtraction. + If is . + + + + Multiplies a vector with a scalar. + + The vector to scale. + The scalar value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a scalar. + + The scalar value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a scalar with a vector. + + The scalar to divide. + The vector. + The result of the division. + If is . + + + + Divides a vector with a scalar. + + The vector to divide. + The scalar value. + The result of the division. + If is . + + + + Pointwise divides two Vectors. + + The vector to divide. + The other vector. + The result of the division. + If and are not the same size. + If is . + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + of each element of the vector of the given divisor. + + The vector whose elements we want to compute the remainder of. + The divisor to use. + If is . + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + of the given dividend of each element of the vector. + + The dividend we want to compute the remainder of. + The vector whose elements we want to use as divisor. + If is . + + + + Computes the pointwise remainder (% operator), where the result has the sign of the dividend, + of each element of two vectors. + + The vector whose elements we want to compute the remainder of. + The divisor to use. + If and are not the same size. + If is . + + + + Computes the sqrt of a vector pointwise + + The input vector + + + + + Computes the exponential of a vector pointwise + + The input vector + + + + + Computes the log of a vector pointwise + + The input vector + + + + + Computes the log10 of a vector pointwise + + The input vector + + + + + Computes the sin of a vector pointwise + + The input vector + + + + + Computes the cos of a vector pointwise + + The input vector + + + + + Computes the tan of a vector pointwise + + The input vector + + + + + Computes the asin of a vector pointwise + + The input vector + + + + + Computes the acos of a vector pointwise + + The input vector + + + + + Computes the atan of a vector pointwise + + The input vector + + + + + Computes the sinh of a vector pointwise + + The input vector + + + + + Computes the cosh of a vector pointwise + + The input vector + + + + + Computes the tanh of a vector pointwise + + The input vector + + + + + Computes the absolute value of a vector pointwise + + The input vector + + + + + Computes the floor of a vector pointwise + + The input vector + + + + + Computes the ceiling of a vector pointwise + + The input vector + + + + + Computes the rounded value of a vector pointwise + + The input vector + + + + + Converts a vector to single precision. + + + + + Converts a vector to double precision. + + + + + Converts a vector to single precision complex numbers. + + + + + Converts a vector to double precision complex numbers. + + + + + Gets a single precision complex vector with the real parts from the given vector. + + + + + Gets a double precision complex vector with the real parts from the given vector. + + + + + Gets a real vector representing the real parts of a complex vector. + + + + + Gets a real vector representing the real parts of a complex vector. + + + + + Gets a real vector representing the imaginary parts of a complex vector. + + + + + Gets a real vector representing the imaginary parts of a complex vector. + + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + + Predictor matrix X + Response vector Y + The direct method to be used to compute the regression. + Best fitting vector for model parameters β + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + + Predictor matrix X + Response matrix Y + The direct method to be used to compute the regression. + Best fitting vector for model parameters β + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + + List of predictor-arrays. + List of responses + True if an intercept should be added as first artificial predictor value. Default = false. + The direct method to be used to compute the regression. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + Uses the cholesky decomposition of the normal equations. + + Sequence of predictor-arrays and their response. + True if an intercept should be added as first artificial predictor value. Default = false. + The direct method to be used to compute the regression. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + Uses the cholesky decomposition of the normal equations. + + Predictor matrix X + Response vector Y + Best fitting vector for model parameters β + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + Uses the cholesky decomposition of the normal equations. + + Predictor matrix X + Response matrix Y + Best fitting vector for model parameters β + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + Uses the cholesky decomposition of the normal equations. + + List of predictor-arrays. + List of responses + True if an intercept should be added as first artificial predictor value. Default = false. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + Uses the cholesky decomposition of the normal equations. + + Sequence of predictor-arrays and their response. + True if an intercept should be added as first artificial predictor value. Default = false. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + Uses an orthogonal decomposition and is therefore more numerically stable than the normal equations but also slower. + + Predictor matrix X + Response vector Y + Best fitting vector for model parameters β + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + Uses an orthogonal decomposition and is therefore more numerically stable than the normal equations but also slower. + + Predictor matrix X + Response matrix Y + Best fitting vector for model parameters β + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + Uses an orthogonal decomposition and is therefore more numerically stable than the normal equations but also slower. + + List of predictor-arrays. + List of responses + True if an intercept should be added as first artificial predictor value. Default = false. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + Uses an orthogonal decomposition and is therefore more numerically stable than the normal equations but also slower. + + Sequence of predictor-arrays and their response. + True if an intercept should be added as first artificial predictor value. Default = false. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + Uses a singular value decomposition and is therefore more numerically stable (especially if ill-conditioned) than the normal equations or QR but also slower. + + Predictor matrix X + Response vector Y + Best fitting vector for model parameters β + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + Uses a singular value decomposition and is therefore more numerically stable (especially if ill-conditioned) than the normal equations or QR but also slower. + + Predictor matrix X + Response matrix Y + Best fitting vector for model parameters β + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + Uses a singular value decomposition and is therefore more numerically stable (especially if ill-conditioned) than the normal equations or QR but also slower. + + List of predictor-arrays. + List of responses + True if an intercept should be added as first artificial predictor value. Default = false. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + Uses a singular value decomposition and is therefore more numerically stable (especially if ill-conditioned) than the normal equations or QR but also slower. + + Sequence of predictor-arrays and their response. + True if an intercept should be added as first artificial predictor value. Default = false. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Least-Squares fitting the points (x,y) to a line y : x -> a+b*x, + returning its best fitting parameters as (a, b) tuple, + where a is the intercept and b the slope. + + Predictor (independent) + Response (dependent) + + + + Least-Squares fitting the points (x,y) to a line y : x -> a+b*x, + returning its best fitting parameters as (a, b) tuple, + where a is the intercept and b the slope. + + Predictor-Response samples as tuples + + + + Weighted Linear Regression using normal equations. + + Predictor matrix X + Response vector Y + Weight matrix W, usually diagonal with an entry for each predictor (row). + + + + Weighted Linear Regression using normal equations. + + Predictor matrix X + Response matrix Y + Weight matrix W, usually diagonal with an entry for each predictor (row). + + + + Weighted Linear Regression using normal equations. + + Predictor matrix X + Response vector Y + Weight matrix W, usually diagonal with an entry for each predictor (row). + True if an intercept should be added as first artificial predictor value. Default = false. + + + + Weighted Linear Regression using normal equations. + + List of sample vectors (predictor) together with their response. + List of weights, one for each sample. + True if an intercept should be added as first artificial predictor value. Default = false. + + + + Locally-Weighted Linear Regression using normal equations. + + + + + Locally-Weighted Linear Regression using normal equations. + + + + + First Order AB method(same as Forward Euler) + + Initial value + Start Time + End Time + Size of output array(the larger, the finer) + ode model + approximation with size N + + + + Second Order AB Method + + Initial value 1 + Start Time + End Time + Size of output array(the larger, the finer) + ode model + approximation with size N + + + + Third Order AB Method + + Initial value 1 + Start Time + End Time + Size of output array(the larger, the finer) + ode model + approximation with size N + + + + Fourth Order AB Method + + Initial value 1 + Start Time + End Time + Size of output array(the larger, the finer) + ode model + approximation with size N + + + + ODE Solver Algorithms + + + + + Second Order Runge-Kutta method + + initial value + start time + end time + Size of output array(the larger, the finer) + ode function + approximations + + + + Fourth Order Runge-Kutta method + + initial value + start time + end time + Size of output array(the larger, the finer) + ode function + approximations + + + + Second Order Runge-Kutta to solve ODE SYSTEM + + initial vector + start time + end time + Size of output array(the larger, the finer) + ode function + approximations + + + + Fourth Order Runge-Kutta to solve ODE SYSTEM + + initial vector + start time + end time + Size of output array(the larger, the finer) + ode function + approximations + + + + Class to represent a permutation for a subset of the natural numbers. + + + + + Entry _indices[i] represents the location to which i is permuted to. + + + + + Initializes a new instance of the Permutation class. + + An array which represents where each integer is permuted too: indices[i] represents that integer i + is permuted to location indices[i]. + + + + Gets the number of elements this permutation is over. + + + + + Computes where permutes too. + + The index to permute from. + The index which is permuted to. + + + + Computes the inverse of the permutation. + + The inverse of the permutation. + + + + Construct an array from a sequence of inversions. + + + From wikipedia: the permutation 12043 has the inversions (0,2), (1,2) and (3,4). This would be + encoded using the array [22244]. + + The set of inversions to construct the permutation from. + A permutation generated from a sequence of inversions. + + + + Construct a sequence of inversions from the permutation. + + + From wikipedia: the permutation 12043 has the inversions (0,2), (1,2) and (3,4). This would be + encoded using the array [22244]. + + A sequence of inversions. + + + + Checks whether the array represents a proper permutation. + + An array which represents where each integer is permuted too: indices[i] represents that integer i + is permuted to location indices[i]. + True if represents a proper permutation, false otherwise. + + + + Utilities for working with floating point numbers. + + + + Useful links: + + + http://docs.sun.com/source/806-3568/ncg_goldberg.html#689 - What every computer scientist should know about floating-point arithmetic + + + http://en.wikipedia.org/wiki/Machine_epsilon - Gives the definition of machine epsilon + + + + + + + + Compares two doubles and determines which double is bigger. + a < b -> -1; a ~= b (almost equal according to parameter) -> 0; a > b -> +1. + + The first value. + The second value. + The absolute accuracy required for being almost equal. + + + + Compares two doubles and determines which double is bigger. + a < b -> -1; a ~= b (almost equal according to parameter) -> 0; a > b -> +1. + + The first value. + The second value. + The number of decimal places on which the values must be compared. Must be 1 or larger. + + + + Compares two doubles and determines which double is bigger. + a < b -> -1; a ~= b (almost equal according to parameter) -> 0; a > b -> +1. + + The first value. + The second value. + The relative accuracy required for being almost equal. + + + + Compares two doubles and determines which double is bigger. + a < b -> -1; a ~= b (almost equal according to parameter) -> 0; a > b -> +1. + + The first value. + The second value. + The number of decimal places on which the values must be compared. Must be 1 or larger. + + + + Compares two doubles and determines which double is bigger. + a < b -> -1; a ~= b (almost equal according to parameter) -> 0; a > b -> +1. + + The first value. + The second value. + The maximum error in terms of Units in Last Place (ulps), i.e. the maximum number of decimals that may be different. Must be 1 or larger. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The absolute accuracy required for being almost equal. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The absolute accuracy required for being almost equal. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The relative accuracy required for being almost equal. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The relative accuracy required for being almost equal. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the tolerance or not. Equality comparison is based on the binary representation. + + The first value. + The second value. + The maximum number of floating point values for which the two values are considered equal. Must be 1 or larger. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the tolerance or not. Equality comparison is based on the binary representation. + + The first value. + The second value. + The maximum number of floating point values for which the two values are considered equal. Must be 1 or larger. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of thg. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of thg. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The absolute accuracy required for being almost equal. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The absolute accuracy required for being almost equal. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The number of decimal places. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The number of decimal places. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The relative accuracy required for being almost equal. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The relative accuracy required for being almost equal. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the tolerance or not. Equality comparison is based on the binary representation. + + The first value. + The second value. + The maximum number of floating point values for which the two values are considered equal. Must be 1 or larger. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the tolerance or not. Equality comparison is based on the binary representation. + + The first value. + The second value. + The maximum number of floating point values for which the two values are considered equal. Must be 1 or larger. + true if the first value is smaller than the second value; otherwise false. + + + + The number of binary digits used to represent the binary number for a double precision floating + point value. i.e. there are this many digits used to represent the + actual number, where in a number as: 0.134556 * 10^5 the digits are 0.134556 and the exponent is 5. + + + + + The number of binary digits used to represent the binary number for a single precision floating + point value. i.e. there are this many digits used to represent the + actual number, where in a number as: 0.134556 * 10^5 the digits are 0.134556 and the exponent is 5. + + + + + Standard epsilon, the maximum relative precision of IEEE 754 double-precision floating numbers (64 bit). + According to the definition of Prof. Demmel and used in LAPACK and Scilab. + + + + + Standard epsilon, the maximum relative precision of IEEE 754 double-precision floating numbers (64 bit). + According to the definition of Prof. Higham and used in the ISO C standard and MATLAB. + + + + + Standard epsilon, the maximum relative precision of IEEE 754 single-precision floating numbers (32 bit). + According to the definition of Prof. Demmel and used in LAPACK and Scilab. + + + + + Standard epsilon, the maximum relative precision of IEEE 754 single-precision floating numbers (32 bit). + According to the definition of Prof. Higham and used in the ISO C standard and MATLAB. + + + + + Actual double precision machine epsilon, the smallest number that can be subtracted from 1, yielding a results different than 1. + This is also known as unit roundoff error. According to the definition of Prof. Demmel. + On a standard machine this is equivalent to `DoublePrecision`. + + + + + Actual double precision machine epsilon, the smallest number that can be added to 1, yielding a results different than 1. + This is also known as unit roundoff error. According to the definition of Prof. Higham. + On a standard machine this is equivalent to `PositiveDoublePrecision`. + + + + + The number of significant decimal places of double-precision floating numbers (64 bit). + + + + + The number of significant decimal places of single-precision floating numbers (32 bit). + + + + + Value representing 10 * 2^(-53) = 1.11022302462516E-15 + + + + + Value representing 10 * 2^(-24) = 5.96046447753906E-07 + + + + + Returns the magnitude of the number. + + The value. + The magnitude of the number. + + + + Returns the magnitude of the number. + + The value. + The magnitude of the number. + + + + Returns the number divided by it's magnitude, effectively returning a number between -10 and 10. + + The value. + The value of the number. + + + + Returns a 'directional' long value. This is a long value which acts the same as a double, + e.g. a negative double value will return a negative double value starting at 0 and going + more negative as the double value gets more negative. + + The input double value. + A long value which is roughly the equivalent of the double value. + + + + Returns a 'directional' int value. This is a int value which acts the same as a float, + e.g. a negative float value will return a negative int value starting at 0 and going + more negative as the float value gets more negative. + + The input float value. + An int value which is roughly the equivalent of the double value. + + + + Increments a floating point number to the next bigger number representable by the data type. + + The value which needs to be incremented. + How many times the number should be incremented. + + The incrementation step length depends on the provided value. + Increment(double.MaxValue) will return positive infinity. + + The next larger floating point value. + + + + Decrements a floating point number to the next smaller number representable by the data type. + + The value which should be decremented. + How many times the number should be decremented. + + The decrementation step length depends on the provided value. + Decrement(double.MinValue) will return negative infinity. + + The next smaller floating point value. + + + + Forces small numbers near zero to zero, according to the specified absolute accuracy. + + The real number to coerce to zero, if it is almost zero. + The maximum count of numbers between the zero and the number . + + Zero if || is fewer than numbers from zero, otherwise. + + + + + Forces small numbers near zero to zero, according to the specified absolute accuracy. + + The real number to coerce to zero, if it is almost zero. + The maximum count of numbers between the zero and the number . + + Zero if || is fewer than numbers from zero, otherwise. + + + Thrown if is smaller than zero. + + + + + Forces small numbers near zero to zero, according to the specified absolute accuracy. + + The real number to coerce to zero, if it is almost zero. + The absolute threshold for to consider it as zero. + Zero if || is smaller than , otherwise. + + Thrown if is smaller than zero. + + + + + Forces small numbers near zero to zero. + + The real number to coerce to zero, if it is almost zero. + Zero if || is smaller than 2^(-53) = 1.11e-16, otherwise. + + + + Determines the range of floating point numbers that will match the specified value with the given tolerance. + + The value. + The ulps difference. + + Thrown if is smaller than zero. + + Tuple of the bottom and top range ends. + + + + Returns the floating point number that will match the value with the tolerance on the maximum size (i.e. the result is + always bigger than the value) + + The value. + The ulps difference. + The maximum floating point number which is larger than the given . + + + + Returns the floating point number that will match the value with the tolerance on the minimum size (i.e. the result is + always smaller than the value) + + The value. + The ulps difference. + The minimum floating point number which is smaller than the given . + + + + Determines the range of ulps that will match the specified value with the given tolerance. + + The value. + The relative difference. + + Thrown if is smaller than zero. + + + Thrown if is double.PositiveInfinity or double.NegativeInfinity. + + + Thrown if is double.NaN. + + + Tuple with the number of ULPS between the value and the value - relativeDifference as first, + and the number of ULPS between the value and the value + relativeDifference as second value. + + + + + Evaluates the count of numbers between two double numbers + + The first parameter. + The second parameter. + The second number is included in the number, thus two equal numbers evaluate to zero and two neighbor numbers evaluate to one. Therefore, what is returned is actually the count of numbers between plus 1. + The number of floating point values between and . + + Thrown if is double.PositiveInfinity or double.NegativeInfinity. + + + Thrown if is double.NaN. + + + Thrown if is double.PositiveInfinity or double.NegativeInfinity. + + + Thrown if is double.NaN. + + + + + Evaluates the minimum distance to the next distinguishable number near the argument value. + + The value used to determine the minimum distance. + + Relative Epsilon (positive double or NaN). + + Evaluates the negative epsilon. The more common positive epsilon is equal to two times this negative epsilon. + + + + + Evaluates the minimum distance to the next distinguishable number near the argument value. + + The value used to determine the minimum distance. + + Relative Epsilon (positive float or NaN). + + Evaluates the negative epsilon. The more common positive epsilon is equal to two times this negative epsilon. + + + + + Evaluates the minimum distance to the next distinguishable number near the argument value. + + The value used to determine the minimum distance. + Relative Epsilon (positive double or NaN) + Evaluates the positive epsilon. See also + + + + + Evaluates the minimum distance to the next distinguishable number near the argument value. + + The value used to determine the minimum distance. + Relative Epsilon (positive float or NaN) + Evaluates the positive epsilon. See also + + + + + Calculates the actual (negative) double precision machine epsilon - the smallest number that can be subtracted from 1, yielding a results different than 1. + This is also known as unit roundoff error. According to the definition of Prof. Demmel. + + Positive Machine epsilon + + + + Calculates the actual positive double precision machine epsilon - the smallest number that can be added to 1, yielding a results different than 1. + This is also known as unit roundoff error. According to the definition of Prof. Higham. + + Machine epsilon + + + + Compares two doubles and determines if they are equal + within the specified maximum absolute error. + + The norm of the first value (can be negative). + The norm of the second value (can be negative). + The norm of the difference of the two values (can be negative). + The absolute accuracy required for being almost equal. + True if both doubles are almost equal up to the specified maximum absolute error, false otherwise. + + + + Compares two doubles and determines if they are equal + within the specified maximum absolute error. + + The first value. + The second value. + The absolute accuracy required for being almost equal. + True if both doubles are almost equal up to the specified maximum absolute error, false otherwise. + + + + Compares two doubles and determines if they are equal + within the specified maximum error. + + The norm of the first value (can be negative). + The norm of the second value (can be negative). + The norm of the difference of the two values (can be negative). + The accuracy required for being almost equal. + True if both doubles are almost equal up to the specified maximum error, false otherwise. + + + + Compares two doubles and determines if they are equal + within the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + True if both doubles are almost equal up to the specified maximum error, false otherwise. + + + + Compares two doubles and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two complex and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two complex and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two complex and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two doubles and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two complex and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two complex and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two complex and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Checks whether two real numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Checks whether two real numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Checks whether two Compex numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Checks whether two Compex numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Checks whether two real numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Checks whether two real numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Checks whether two Compex numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Checks whether two Compex numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not, using the + number of decimal places as an absolute measure. + + + + The values are equal if the difference between the two numbers is smaller than 0.5e-decimalPlaces. We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The norm of the first value (can be negative). + The norm of the second value (can be negative). + The norm of the difference of the two values (can be negative). + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not, using the + number of decimal places as an absolute measure. + + + + The values are equal if the difference between the two numbers is smaller than 0.5e-decimalPlaces. We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not. If the numbers + are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The norm of the first value (can be negative). + The norm of the second value (can be negative). + The norm of the difference of the two values (can be negative). + The number of decimal places. + Thrown if is smaller than zero. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not. If the numbers + are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not, using the + number of decimal places as an absolute measure. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not, using the + number of decimal places as an absolute measure. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not, using the + number of decimal places as an absolute measure. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not, using the + number of decimal places as an absolute measure. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not. If the numbers + are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not. If the numbers + are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not. If the numbers + are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not. If the numbers + are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the tolerance or not. Equality comparison is based on the binary representation. + + + + Determines the 'number' of floating point numbers between two values (i.e. the number of discrete steps + between the two numbers) and then checks if that is within the specified tolerance. So if a tolerance + of 1 is passed then the result will be true only if the two numbers have the same binary representation + OR if they are two adjacent numbers that only differ by one step. + + + The comparison method used is explained in http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm . The article + at http://www.extremeoptimization.com/resources/Articles/FPDotNetConceptsAndFormats.aspx explains how to transform the C code to + .NET enabled code without using pointers and unsafe code. + + + The first value. + The second value. + The maximum number of floating point values between the two values. Must be 1 or larger. + Thrown if is smaller than one. + + + + Compares two floats and determines if they are equal to within the tolerance or not. Equality comparison is based on the binary representation. + + The first value. + The second value. + The maximum number of floating point values between the two values. Must be 1 or larger. + Thrown if is smaller than one. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two vectors and determines if they are equal within the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two vectors and determines if they are equal within the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two vectors and determines if they are equal to within the specified number + of decimal places or not, using the number of decimal places as an absolute measure. + + The first value. + The second value. + The number of decimal places. + + + + Compares two vectors and determines if they are equal to within the specified number of decimal places or not. + If the numbers are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + The first value. + The second value. + The number of decimal places. + + + + Compares two matrices and determines if they are equal within the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two matrices and determines if they are equal within the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two matrices and determines if they are equal to within the specified number + of decimal places or not, using the number of decimal places as an absolute measure. + + The first value. + The second value. + The number of decimal places. + + + + Compares two matrices and determines if they are equal to within the specified number of decimal places or not. + If the numbers are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + The first value. + The second value. + The number of decimal places. + + + + Support Interface for Precision Operations (like AlmostEquals). + + Type of the implementing class. + + + + Returns a Norm of a value of this type, which is appropriate for measuring how + close this value is to zero. + + A norm of this value. + + + + Returns a Norm of the difference of two values of this type, which is + appropriate for measuring how close together these two values are. + + The value to compare with. + A norm of the difference between this and the other value. + + + + Consistency vs. performance trade-off between runs on different machines. + + + + Consistent on the same CPU only (maximum performance) + + + Consistent on Intel and compatible CPUs with SSE2 support (maximum compatibility) + + + Consistent on Intel CPUs supporting SSE2 or later + + + Consistent on Intel CPUs supporting SSE4.2 or later + + + Consistent on Intel CPUs supporting AVX or later + + + Consistent on Intel CPUs supporting AVX2 or later + + + + Use the best provider available. + + + + + Use a specific provider if configured, e.g. using the + "MathNetNumericsFFTProvider" environment variable, + or fall back to the best provider. + + + + + Try to find out whether the provider is available, at least in principle. + Verification may still fail if available, but it will certainly fail if unavailable. + + + + + Initialize and verify that the provided is indeed available. If not, fall back to alternatives like the managed provider + + + + + Try to find out whether the provider is available, at least in principle. + Verification may still fail if available, but it will certainly fail if unavailable. + + + + + Initialize and verify that the provided is indeed available. If not, fall back to alternatives like the managed provider + + + + + How to transpose a matrix. + + + + + Don't transpose a matrix. + + + + + Transpose a matrix. + + + + + Conjugate transpose a complex matrix. + + If a conjugate transpose is used with a real matrix, then the matrix is just transposed. + + + + Types of matrix norms. + + + + + The 1-norm. + + + + + The Frobenius norm. + + + + + The infinity norm. + + + + + The largest absolute value norm. + + + + + Interface to linear algebra algorithms that work off 1-D arrays. + + + + + Try to find out whether the provider is available, at least in principle. + Verification may still fail if available, but it will certainly fail if unavailable. + + + + + Initialize and verify that the provided is indeed available. If not, fall back to alternatives like the managed provider + + + + + Interface to linear algebra algorithms that work off 1-D arrays. + + Supported data types are Double, Single, Complex, and Complex32. + + + + Adds a scaled vector to another: result = y + alpha*x. + + The vector to update. + The value to scale by. + The vector to add to . + The result of the addition. + This is similar to the AXPY BLAS routine. + + + + Scales an array. Can be used to scale a vector and a matrix. + + The scalar. + The values to scale. + This result of the scaling. + This is similar to the SCAL BLAS routine. + + + + Conjugates an array. Can be used to conjugate a vector and a matrix. + + The values to conjugate. + This result of the conjugation. + + + + Computes the dot product of x and y. + + The vector x. + The vector y. + The dot product of x and y. + This is equivalent to the DOT BLAS routine. + + + + Does a point wise add of two arrays z = x + y. This can be used + to add vectors or matrices. + + The array x. + The array y. + The result of the addition. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise subtraction of two arrays z = x - y. This can be used + to subtract vectors or matrices. + + The array x. + The array y. + The result of the subtraction. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise multiplication of two arrays z = x * y. This can be used + to multiply elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise multiplication. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise division of two arrays z = x / y. This can be used + to divide elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise division. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise power of two arrays z = x ^ y. This can be used + to raise elements of vectors or matrices to the powers of another vector or matrix. + + The array x. + The array y. + The result of the point wise power. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Computes the requested of the matrix. + + The type of norm to compute. + The number of rows. + The number of columns. + The matrix to compute the norm from. + + The requested of the matrix. + + + + + Multiples two matrices. result = x * y + + The x matrix. + The number of rows in the x matrix. + The number of columns in the x matrix. + The y matrix. + The number of rows in the y matrix. + The number of columns in the y matrix. + Where to store the result of the multiplication. + This is a simplified version of the BLAS GEMM routine with alpha + set to 1.0 and beta set to 0.0, and x and y are not transposed. + + + + Multiplies two matrices and updates another with the result. c = alpha*op(a)*op(b) + beta*c + + How to transpose the matrix. + How to transpose the matrix. + The value to scale matrix. + The a matrix. + The number of rows in the matrix. + The number of columns in the matrix. + The b matrix + The number of rows in the matrix. + The number of columns in the matrix. + The value to scale the matrix. + The c matrix. + + + + Computes the LUP factorization of A. P*A = L*U. + + An by matrix. The matrix is overwritten with the + the LU factorization on exit. The lower triangular factor L is stored in under the diagonal of (the diagonal is always 1.0 + for the L factor). The upper triangular factor U is stored on and above the diagonal of . + The order of the square matrix . + On exit, it contains the pivot indices. The size of the array must be . + This is equivalent to the GETRF LAPACK routine. + + + + Computes the inverse of matrix using LU factorization. + + The N by N matrix to invert. Contains the inverse On exit. + The order of the square matrix . + This is equivalent to the GETRF and GETRI LAPACK routines. + + + + Computes the inverse of a previously factored matrix. + + The LU factored N by N matrix. Contains the inverse On exit. + The order of the square matrix . + The pivot indices of . + This is equivalent to the GETRI LAPACK routine. + + + + Solves A*X=B for X using LU factorization. + + The number of columns of B. + The square matrix A. + The order of the square matrix . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRF and GETRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The number of columns of B. + The factored A matrix. + The order of the square matrix . + The pivot indices of . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRS LAPACK routine. + + + + Computes the Cholesky factorization of A. + + On entry, a square, positive definite matrix. On exit, the matrix is overwritten with the + the Cholesky factorization. + The number of rows or columns in the matrix. + This is equivalent to the POTRF LAPACK routine. + + + + Solves A*X=B for X using Cholesky factorization. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRF add POTRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRS LAPACK routine. + + + + Computes the full QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the R matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A M by M matrix that holds the Q matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Computes the thin QR factorization of A where M > N. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the Q matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A N by N matrix that holds the R matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Solves A*X=B for X using QR factorization of A. + + The A matrix. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Solves A*X=B for X using a previously QR factored matrix. + + The Q matrix obtained by QR factor. This is only used for the managed provider and can be + null for the native provider. The native provider uses the Q portion stored in the R matrix. + The R matrix obtained by calling . + The number of rows in the A matrix. + The number of columns in the A matrix. + Contains additional information on Q. Only used for the native solver + and can be null for the managed provider. + On entry the B matrix; on exit the X matrix. + The number of columns of B. + On exit, the solution matrix. + Rows must be greater or equal to columns. + The type of QR factorization to perform. + + + + Computes the singular value decomposition of A. + + Compute the singular U and VT vectors or not. + On entry, the M by N matrix to decompose. On exit, A may be overwritten. + The number of rows in the A matrix. + The number of columns in the A matrix. + The singular values of A in ascending value. + If is true, on exit U contains the left + singular vectors. + If is true, on exit VT contains the transposed + right singular vectors. + This is equivalent to the GESVD LAPACK routine. + + + + Solves A*X=B for X using the singular value decomposition of A. + + On entry, the M by N matrix to decompose. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Solves A*X=B for X using a previously SVD decomposed matrix. + + The number of rows in the A matrix. + The number of columns in the A matrix. + The s values returned by . + The left singular vectors returned by . + The right singular vectors returned by . + The B matrix + The number of columns of B. + On exit, the solution matrix. + + + + Computes the eigenvalues and eigenvectors of a matrix. + + Whether the matrix is symmetric or not. + The order of the matrix. + The matrix to decompose. The lenth of the array must be order * order. + On output, the matrix contains the eigen vectors. The lenth of the array must be order * order. + On output, the eigen values (λ) of matrix in ascending value. The length of the arry must . + On output, the block diagonal eigenvalue matrix. The lenth of the array must be order * order. + + + + Use the best provider available. + + + + + Use a specific provider if configured, e.g. using the + "MathNetNumericsLAProvider" environment variable, + or fall back to the best provider. + + + + + The managed linear algebra provider. + + + The managed linear algebra provider. + + + The managed linear algebra provider. + + + The managed linear algebra provider. + + + The managed linear algebra provider. + + + + + Adds a scaled vector to another: result = y + alpha*x. + + The vector to update. + The value to scale by. + The vector to add to . + The result of the addition. + This is similar to the AXPY BLAS routine. + + + + Scales an array. Can be used to scale a vector and a matrix. + + The scalar. + The values to scale. + This result of the scaling. + This is similar to the SCAL BLAS routine. + + + + Conjugates an array. Can be used to conjugate a vector and a matrix. + + The values to conjugate. + This result of the conjugation. + + + + Computes the dot product of x and y. + + The vector x. + The vector y. + The dot product of x and y. + This is equivalent to the DOT BLAS routine. + + + + Does a point wise add of two arrays z = x + y. This can be used + to add vectors or matrices. + + The array x. + The array y. + The result of the addition. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise subtraction of two arrays z = x - y. This can be used + to subtract vectors or matrices. + + The array x. + The array y. + The result of the subtraction. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise multiplication of two arrays z = x * y. This can be used + to multiple elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise multiplication. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise division of two arrays z = x / y. This can be used + to divide elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise division. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise power of two arrays z = x ^ y. This can be used + to raise elements of vectors or matrices to the powers of another vector or matrix. + + The array x. + The array y. + The result of the point wise power. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Computes the requested of the matrix. + + The type of norm to compute. + The number of rows. + The number of columns. + The matrix to compute the norm from. + + The requested of the matrix. + + + + + Multiples two matrices. result = x * y + + The x matrix. + The number of rows in the x matrix. + The number of columns in the x matrix. + The y matrix. + The number of rows in the y matrix. + The number of columns in the y matrix. + Where to store the result of the multiplication. + This is a simplified version of the BLAS GEMM routine with alpha + set to 1.0 and beta set to 0.0, and x and y are not transposed. + + + + Multiplies two matrices and updates another with the result. c = alpha*op(a)*op(b) + beta*c + + How to transpose the matrix. + How to transpose the matrix. + The value to scale matrix. + The a matrix. + The number of rows in the matrix. + The number of columns in the matrix. + The b matrix + The number of rows in the matrix. + The number of columns in the matrix. + The value to scale the matrix. + The c matrix. + + + + Cache-Oblivious Matrix Multiplication + + if set to true transpose matrix A. + if set to true transpose matrix B. + The value to scale the matrix A with. + The matrix A. + Row-shift of the left matrix + Column-shift of the left matrix + The matrix B. + Row-shift of the right matrix + Column-shift of the right matrix + The matrix C. + Row-shift of the result matrix + Column-shift of the result matrix + The number of rows of matrix op(A) and of the matrix C. + The number of columns of matrix op(B) and of the matrix C. + The number of columns of matrix op(A) and the rows of the matrix op(B). + The constant number of rows of matrix op(A) and of the matrix C. + The constant number of columns of matrix op(B) and of the matrix C. + The constant number of columns of matrix op(A) and the rows of the matrix op(B). + Indicates if this is the first recursion. + + + + Computes the LUP factorization of A. P*A = L*U. + + An by matrix. The matrix is overwritten with the + the LU factorization on exit. The lower triangular factor L is stored in under the diagonal of (the diagonal is always 1.0 + for the L factor). The upper triangular factor U is stored on and above the diagonal of . + The order of the square matrix . + On exit, it contains the pivot indices. The size of the array must be . + This is equivalent to the GETRF LAPACK routine. + + + + Computes the inverse of matrix using LU factorization. + + The N by N matrix to invert. Contains the inverse On exit. + The order of the square matrix . + This is equivalent to the GETRF and GETRI LAPACK routines. + + + + Computes the inverse of a previously factored matrix. + + The LU factored N by N matrix. Contains the inverse On exit. + The order of the square matrix . + The pivot indices of . + This is equivalent to the GETRI LAPACK routine. + + + + Solves A*X=B for X using LU factorization. + + The number of columns of B. + The square matrix A. + The order of the square matrix . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRF and GETRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The number of columns of B. + The factored A matrix. + The order of the square matrix . + The pivot indices of . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRS LAPACK routine. + + + + Computes the Cholesky factorization of A. + + On entry, a square, positive definite matrix. On exit, the matrix is overwritten with the + the Cholesky factorization. + The number of rows or columns in the matrix. + This is equivalent to the POTRF LAPACK routine. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves A*X=B for X using Cholesky factorization. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRF add POTRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. + The number of rows and columns in A. + The B matrix. + The number of columns in the B matrix. + This is equivalent to the POTRS LAPACK routine. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. Has to be different than . + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The column to solve for. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the R matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A M by M matrix that holds the Q matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the Q matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A N by N matrix that holds the R matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Perform calculation of Q or R + + Work array + Index of column in work array + Q or R matrices + The first row in + The last row + The first column + The last column + Number of available CPUs + + + + Generate column from initial matrix to work array + + Work array + Initial matrix + The number of rows in matrix + The first row + Column index + + + + Solves A*X=B for X using QR factorization of A. + + The A matrix. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Solves A*X=B for X using a previously QR factored matrix. + + The Q matrix obtained by calling . + The R matrix obtained by calling . + The number of rows in the A matrix. + The number of columns in the A matrix. + Contains additional information on Q. Only used for the native solver + and can be null for the managed provider. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Computes the singular value decomposition of A. + + Compute the singular U and VT vectors or not. + On entry, the M by N matrix to decompose. On exit, A may be overwritten. + The number of rows in the A matrix. + The number of columns in the A matrix. + The singular values of A in ascending value. + If is true, on exit U contains the left + singular vectors. + If is true, on exit VT contains the transposed + right singular vectors. + This is equivalent to the GESVD LAPACK routine. + + + + Solves A*X=B for X using the singular value decomposition of A. + + On entry, the M by N matrix to decompose. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Solves A*X=B for X using a previously SVD decomposed matrix. + + The number of rows in the A matrix. + The number of columns in the A matrix. + The s values returned by . + The left singular vectors returned by . + The right singular vectors returned by . + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Computes the eigenvalues and eigenvectors of a matrix. + + Whether the matrix is symmetric or not. + The order of the matrix. + The matrix to decompose. The lenth of the array must be order * order. + On output, the matrix contains the eigen vectors. The lenth of the array must be order * order. + On output, the eigen values (λ) of matrix in ascending value. The length of the arry must . + On output, the block diagonal eigenvalue matrix. The lenth of the array must be order * order. + + + + Assumes that and have already been transposed. + + + + + Assumes that and have already been transposed. + + + + + Adds a scaled vector to another: result = y + alpha*x. + + The vector to update. + The value to scale by. + The vector to add to . + The result of the addition. + This is similar to the AXPY BLAS routine. + + + + Scales an array. Can be used to scale a vector and a matrix. + + The scalar. + The values to scale. + This result of the scaling. + This is similar to the SCAL BLAS routine. + + + + Conjugates an array. Can be used to conjugate a vector and a matrix. + + The values to conjugate. + This result of the conjugation. + + + + Computes the dot product of x and y. + + The vector x. + The vector y. + The dot product of x and y. + This is equivalent to the DOT BLAS routine. + + + + Does a point wise add of two arrays z = x + y. This can be used + to add vectors or matrices. + + The array x. + The array y. + The result of the addition. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise subtraction of two arrays z = x - y. This can be used + to subtract vectors or matrices. + + The array x. + The array y. + The result of the subtraction. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise multiplication of two arrays z = x * y. This can be used + to multiple elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise multiplication. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise division of two arrays z = x / y. This can be used + to divide elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise division. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise power of two arrays z = x ^ y. This can be used + to raise elements of vectors or matrices to the powers of another vector or matrix. + + The array x. + The array y. + The result of the point wise power. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Computes the requested of the matrix. + + The type of norm to compute. + The number of rows. + The number of columns. + The matrix to compute the norm from. + The requested of the matrix. + + + + Multiples two matrices. result = x * y + + The x matrix. + The number of rows in the x matrix. + The number of columns in the x matrix. + The y matrix. + The number of rows in the y matrix. + The number of columns in the y matrix. + Where to store the result of the multiplication. + This is a simplified version of the BLAS GEMM routine with alpha + set to 1.0 and beta set to 0.0, and x and y are not transposed. + + + + Multiplies two matrices and updates another with the result. c = alpha*op(a)*op(b) + beta*c + + How to transpose the matrix. + How to transpose the matrix. + The value to scale matrix. + The a matrix. + The number of rows in the matrix. + The number of columns in the matrix. + The b matrix + The number of rows in the matrix. + The number of columns in the matrix. + The value to scale the matrix. + The c matrix. + + + + Cache-Oblivious Matrix Multiplication + + if set to true transpose matrix A. + if set to true transpose matrix B. + The value to scale the matrix A with. + The matrix A. + Row-shift of the left matrix + Column-shift of the left matrix + The matrix B. + Row-shift of the right matrix + Column-shift of the right matrix + The matrix C. + Row-shift of the result matrix + Column-shift of the result matrix + The number of rows of matrix op(A) and of the matrix C. + The number of columns of matrix op(B) and of the matrix C. + The number of columns of matrix op(A) and the rows of the matrix op(B). + The constant number of rows of matrix op(A) and of the matrix C. + The constant number of columns of matrix op(B) and of the matrix C. + The constant number of columns of matrix op(A) and the rows of the matrix op(B). + Indicates if this is the first recursion. + + + + Computes the LUP factorization of A. P*A = L*U. + + An by matrix. The matrix is overwritten with the + the LU factorization on exit. The lower triangular factor L is stored in under the diagonal of (the diagonal is always 1.0 + for the L factor). The upper triangular factor U is stored on and above the diagonal of . + The order of the square matrix . + On exit, it contains the pivot indices. The size of the array must be . + This is equivalent to the GETRF LAPACK routine. + + + + Computes the inverse of matrix using LU factorization. + + The N by N matrix to invert. Contains the inverse On exit. + The order of the square matrix . + This is equivalent to the GETRF and GETRI LAPACK routines. + + + + Computes the inverse of a previously factored matrix. + + The LU factored N by N matrix. Contains the inverse On exit. + The order of the square matrix . + The pivot indices of . + This is equivalent to the GETRI LAPACK routine. + + + + Solves A*X=B for X using LU factorization. + + The number of columns of B. + The square matrix A. + The order of the square matrix . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRF and GETRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The number of columns of B. + The factored A matrix. + The order of the square matrix . + The pivot indices of . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRS LAPACK routine. + + + + Computes the Cholesky factorization of A. + + On entry, a square, positive definite matrix. On exit, the matrix is overwritten with the + the Cholesky factorization. + The number of rows or columns in the matrix. + This is equivalent to the POTRF LAPACK routine. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves A*X=B for X using Cholesky factorization. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRF add POTRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRS LAPACK routine. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. Has to be different than . + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The column to solve for. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the R matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A M by M matrix that holds the Q matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the Q matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A N by N matrix that holds the R matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Perform calculation of Q or R + + Work array + Index of column in work array + Q or R matrices + The first row in + The last row + The first column + The last column + Number of available CPUs + + + + Generate column from initial matrix to work array + + Work array + Initial matrix + The number of rows in matrix + The first row + Column index + + + + Solves A*X=B for X using QR factorization of A. + + The A matrix. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Solves A*X=B for X using a previously QR factored matrix. + + The Q matrix obtained by calling . + The R matrix obtained by calling . + The number of rows in the A matrix. + The number of columns in the A matrix. + Contains additional information on Q. Only used for the native solver + and can be null for the managed provider. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Computes the singular value decomposition of A. + + Compute the singular U and VT vectors or not. + On entry, the M by N matrix to decompose. On exit, A may be overwritten. + The number of rows in the A matrix. + The number of columns in the A matrix. + The singular values of A in ascending value. + If is true, on exit U contains the left + singular vectors. + If is true, on exit VT contains the transposed + right singular vectors. + This is equivalent to the GESVD LAPACK routine. + + + + Solves A*X=B for X using the singular value decomposition of A. + + On entry, the M by N matrix to decompose. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Solves A*X=B for X using a previously SVD decomposed matrix. + + The number of rows in the A matrix. + The number of columns in the A matrix. + The s values returned by . + The left singular vectors returned by . + The right singular vectors returned by . + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Computes the eigenvalues and eigenvectors of a matrix. + + Whether the matrix is symmetric or not. + The order of the matrix. + The matrix to decompose. The lenth of the array must be order * order. + On output, the matrix contains the eigen vectors. The lenth of the array must be order * order. + On output, the eigen values (λ) of matrix in ascending value. The length of the arry must . + On output, the block diagonal eigenvalue matrix. The lenth of the array must be order * order. + + + + Assumes that and have already been transposed. + + + + + Assumes that and have already been transposed. + + + + + Try to find out whether the provider is available, at least in principle. + Verification may still fail if available, but it will certainly fail if unavailable. + + + + + Initialize and verify that the provided is indeed available. If not, fall back to alternatives like the managed provider + + + + + Assumes that and have already been transposed. + + + + + Assumes that and have already been transposed. + + + + + Adds a scaled vector to another: result = y + alpha*x. + + The vector to update. + The value to scale by. + The vector to add to . + The result of the addition. + This is similar to the AXPY BLAS routine. + + + + Scales an array. Can be used to scale a vector and a matrix. + + The scalar. + The values to scale. + This result of the scaling. + This is similar to the SCAL BLAS routine. + + + + Conjugates an array. Can be used to conjugate a vector and a matrix. + + The values to conjugate. + This result of the conjugation. + + + + Computes the dot product of x and y. + + The vector x. + The vector y. + The dot product of x and y. + This is equivalent to the DOT BLAS routine. + + + + Does a point wise add of two arrays z = x + y. This can be used + to add vectors or matrices. + + The array x. + The array y. + The result of the addition. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise subtraction of two arrays z = x - y. This can be used + to subtract vectors or matrices. + + The array x. + The array y. + The result of the subtraction. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise multiplication of two arrays z = x * y. This can be used + to multiple elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise multiplication. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise division of two arrays z = x / y. This can be used + to divide elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise division. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise power of two arrays z = x ^ y. This can be used + to raise elements of vectors or matrices to the powers of another vector or matrix. + + The array x. + The array y. + The result of the point wise power. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Computes the requested of the matrix. + + The type of norm to compute. + The number of rows. + The number of columns. + The matrix to compute the norm from. + + The requested of the matrix. + + + + + Multiples two matrices. result = x * y + + The x matrix. + The number of rows in the x matrix. + The number of columns in the x matrix. + The y matrix. + The number of rows in the y matrix. + The number of columns in the y matrix. + Where to store the result of the multiplication. + This is a simplified version of the BLAS GEMM routine with alpha + set to 1.0 and beta set to 0.0, and x and y are not transposed. + + + + Multiplies two matrices and updates another with the result. c = alpha*op(a)*op(b) + beta*c + + How to transpose the matrix. + How to transpose the matrix. + The value to scale matrix. + The a matrix. + The number of rows in the matrix. + The number of columns in the matrix. + The b matrix + The number of rows in the matrix. + The number of columns in the matrix. + The value to scale the matrix. + The c matrix. + + + + Cache-Oblivious Matrix Multiplication + + if set to true transpose matrix A. + if set to true transpose matrix B. + The value to scale the matrix A with. + The matrix A. + Row-shift of the left matrix + Column-shift of the left matrix + The matrix B. + Row-shift of the right matrix + Column-shift of the right matrix + The matrix C. + Row-shift of the result matrix + Column-shift of the result matrix + The number of rows of matrix op(A) and of the matrix C. + The number of columns of matrix op(B) and of the matrix C. + The number of columns of matrix op(A) and the rows of the matrix op(B). + The constant number of rows of matrix op(A) and of the matrix C. + The constant number of columns of matrix op(B) and of the matrix C. + The constant number of columns of matrix op(A) and the rows of the matrix op(B). + Indicates if this is the first recursion. + + + + Computes the LUP factorization of A. P*A = L*U. + + An by matrix. The matrix is overwritten with the + the LU factorization on exit. The lower triangular factor L is stored in under the diagonal of (the diagonal is always 1.0 + for the L factor). The upper triangular factor U is stored on and above the diagonal of . + The order of the square matrix . + On exit, it contains the pivot indices. The size of the array must be . + This is equivalent to the GETRF LAPACK routine. + + + + Computes the inverse of matrix using LU factorization. + + The N by N matrix to invert. Contains the inverse On exit. + The order of the square matrix . + This is equivalent to the GETRF and GETRI LAPACK routines. + + + + Computes the inverse of a previously factored matrix. + + The LU factored N by N matrix. Contains the inverse On exit. + The order of the square matrix . + The pivot indices of . + This is equivalent to the GETRI LAPACK routine. + + + + Solves A*X=B for X using LU factorization. + + The number of columns of B. + The square matrix A. + The order of the square matrix . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRF and GETRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The number of columns of B. + The factored A matrix. + The order of the square matrix . + The pivot indices of . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRS LAPACK routine. + + + + Computes the Cholesky factorization of A. + + On entry, a square, positive definite matrix. On exit, the matrix is overwritten with the + the Cholesky factorization. + The number of rows or columns in the matrix. + This is equivalent to the POTRF LAPACK routine. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves A*X=B for X using Cholesky factorization. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRF add POTRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. Has to be different than . + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRS LAPACK routine. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. Has to be different than . + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The column to solve for. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the R matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A M by M matrix that holds the Q matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the Q matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A N by N matrix that holds the R matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Perform calculation of Q or R + + Work array + Index of column in work array + Q or R matrices + The first row in + The last row + The first column + The last column + Number of available CPUs + + + + Generate column from initial matrix to work array + + Work array + Initial matrix + The number of rows in matrix + The first row + Column index + + + + Solves A*X=B for X using QR factorization of A. + + The A matrix. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Solves A*X=B for X using a previously QR factored matrix. + + The Q matrix obtained by calling . + The R matrix obtained by calling . + The number of rows in the A matrix. + The number of columns in the A matrix. + Contains additional information on Q. Only used for the native solver + and can be null for the managed provider. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Computes the singular value decomposition of A. + + Compute the singular U and VT vectors or not. + On entry, the M by N matrix to decompose. On exit, A may be overwritten. + The number of rows in the A matrix. + The number of columns in the A matrix. + The singular values of A in ascending value. + If is true, on exit U contains the left + singular vectors. + If is true, on exit VT contains the transposed + right singular vectors. + This is equivalent to the GESVD LAPACK routine. + + + + Given the Cartesian coordinates (da, db) of a point p, these function return the parameters da, db, c, and s + associated with the Givens rotation that zeros the y-coordinate of the point. + + Provides the x-coordinate of the point p. On exit contains the parameter r associated with the Givens rotation + Provides the y-coordinate of the point p. On exit contains the parameter z associated with the Givens rotation + Contains the parameter c associated with the Givens rotation + Contains the parameter s associated with the Givens rotation + This is equivalent to the DROTG LAPACK routine. + + + + Solves A*X=B for X using the singular value decomposition of A. + + On entry, the M by N matrix to decompose. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Solves A*X=B for X using a previously SVD decomposed matrix. + + The number of rows in the A matrix. + The number of columns in the A matrix. + The s values returned by . + The left singular vectors returned by . + The right singular vectors returned by . + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Computes the eigenvalues and eigenvectors of a matrix. + + Whether the matrix is symmetric or not. + The order of the matrix. + The matrix to decompose. The lenth of the array must be order * order. + On output, the matrix contains the eigen vectors. The lenth of the array must be order * order. + On output, the eigen values (λ) of matrix in ascending value. The length of the arry must . + On output, the block diagonal eigenvalue matrix. The lenth of the array must be order * order. + + + + Adds a scaled vector to another: result = y + alpha*x. + + The vector to update. + The value to scale by. + The vector to add to . + The result of the addition. + This is similar to the AXPY BLAS routine. + + + + Scales an array. Can be used to scale a vector and a matrix. + + The scalar. + The values to scale. + This result of the scaling. + This is similar to the SCAL BLAS routine. + + + + Conjugates an array. Can be used to conjugate a vector and a matrix. + + The values to conjugate. + This result of the conjugation. + + + + Computes the dot product of x and y. + + The vector x. + The vector y. + The dot product of x and y. + This is equivalent to the DOT BLAS routine. + + + + Does a point wise add of two arrays z = x + y. This can be used + to add vectors or matrices. + + The array x. + The array y. + The result of the addition. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise subtraction of two arrays z = x - y. This can be used + to subtract vectors or matrices. + + The array x. + The array y. + The result of the subtraction. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise multiplication of two arrays z = x * y. This can be used + to multiple elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise multiplication. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise division of two arrays z = x / y. This can be used + to divide elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise division. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise power of two arrays z = x ^ y. This can be used + to raise elements of vectors or matrices to the powers of another vector or matrix. + + The array x. + The array y. + The result of the point wise power. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Computes the requested of the matrix. + + The type of norm to compute. + The number of rows. + The number of columns. + The matrix to compute the norm from. + + The requested of the matrix. + + + + + Multiples two matrices. result = x * y + + The x matrix. + The number of rows in the x matrix. + The number of columns in the x matrix. + The y matrix. + The number of rows in the y matrix. + The number of columns in the y matrix. + Where to store the result of the multiplication. + This is a simplified version of the BLAS GEMM routine with alpha + set to 1.0 and beta set to 0.0, and x and y are not transposed. + + + + Multiplies two matrices and updates another with the result. c = alpha*op(a)*op(b) + beta*c + + How to transpose the matrix. + How to transpose the matrix. + The value to scale matrix. + The a matrix. + The number of rows in the matrix. + The number of columns in the matrix. + The b matrix + The number of rows in the matrix. + The number of columns in the matrix. + The value to scale the matrix. + The c matrix. + + + + Cache-Oblivious Matrix Multiplication + + if set to true transpose matrix A. + if set to true transpose matrix B. + The value to scale the matrix A with. + The matrix A. + Row-shift of the left matrix + Column-shift of the left matrix + The matrix B. + Row-shift of the right matrix + Column-shift of the right matrix + The matrix C. + Row-shift of the result matrix + Column-shift of the result matrix + The number of rows of matrix op(A) and of the matrix C. + The number of columns of matrix op(B) and of the matrix C. + The number of columns of matrix op(A) and the rows of the matrix op(B). + The constant number of rows of matrix op(A) and of the matrix C. + The constant number of columns of matrix op(B) and of the matrix C. + The constant number of columns of matrix op(A) and the rows of the matrix op(B). + Indicates if this is the first recursion. + + + + Computes the LUP factorization of A. P*A = L*U. + + An by matrix. The matrix is overwritten with the + the LU factorization on exit. The lower triangular factor L is stored in under the diagonal of (the diagonal is always 1.0 + for the L factor). The upper triangular factor U is stored on and above the diagonal of . + The order of the square matrix . + On exit, it contains the pivot indices. The size of the array must be . + This is equivalent to the GETRF LAPACK routine. + + + + Computes the inverse of matrix using LU factorization. + + The N by N matrix to invert. Contains the inverse On exit. + The order of the square matrix . + This is equivalent to the GETRF and GETRI LAPACK routines. + + + + Computes the inverse of a previously factored matrix. + + The LU factored N by N matrix. Contains the inverse On exit. + The order of the square matrix . + The pivot indices of . + This is equivalent to the GETRI LAPACK routine. + + + + Solves A*X=B for X using LU factorization. + + The number of columns of B. + The square matrix A. + The order of the square matrix . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRF and GETRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The number of columns of B. + The factored A matrix. + The order of the square matrix . + The pivot indices of . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRS LAPACK routine. + + + + Computes the Cholesky factorization of A. + + On entry, a square, positive definite matrix. On exit, the matrix is overwritten with the + the Cholesky factorization. + The number of rows or columns in the matrix. + This is equivalent to the POTRF LAPACK routine. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves A*X=B for X using Cholesky factorization. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRF add POTRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRS LAPACK routine. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. Has to be different than . + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The column to solve for. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the R matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A M by M matrix that holds the Q matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the Q matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A N by N matrix that holds the R matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Perform calculation of Q or R + + Work array + Index of column in work array + Q or R matrices + The first row in + The last row + The first column + The last column + Number of available CPUs + + + + Generate column from initial matrix to work array + + Work array + Initial matrix + The number of rows in matrix + The first row + Column index + + + + Solves A*X=B for X using QR factorization of A. + + The A matrix. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Solves A*X=B for X using a previously QR factored matrix. + + The Q matrix obtained by calling . + The R matrix obtained by calling . + The number of rows in the A matrix. + The number of columns in the A matrix. + Contains additional information on Q. Only used for the native solver + and can be null for the managed provider. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Computes the singular value decomposition of A. + + Compute the singular U and VT vectors or not. + On entry, the M by N matrix to decompose. On exit, A may be overwritten. + The number of rows in the A matrix. + The number of columns in the A matrix. + The singular values of A in ascending value. + If is true, on exit U contains the left + singular vectors. + If is true, on exit VT contains the transposed + right singular vectors. + This is equivalent to the GESVD LAPACK routine. + + + + Given the Cartesian coordinates (da, db) of a point p, these function return the parameters da, db, c, and s + associated with the Givens rotation that zeros the y-coordinate of the point. + + Provides the x-coordinate of the point p. On exit contains the parameter r associated with the Givens rotation + Provides the y-coordinate of the point p. On exit contains the parameter z associated with the Givens rotation + Contains the parameter c associated with the Givens rotation + Contains the parameter s associated with the Givens rotation + This is equivalent to the DROTG LAPACK routine. + + + + Solves A*X=B for X using the singular value decomposition of A. + + On entry, the M by N matrix to decompose. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Solves A*X=B for X using a previously SVD decomposed matrix. + + The number of rows in the A matrix. + The number of columns in the A matrix. + The s values returned by . + The left singular vectors returned by . + The right singular vectors returned by . + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Computes the eigenvalues and eigenvectors of a matrix. + + Whether the matrix is symmetric or not. + The order of the matrix. + The matrix to decompose. The lenth of the array must be order * order. + On output, the matrix contains the eigen vectors. The lenth of the array must be order * order. + On output, the eigen values (λ) of matrix in ascending value. The length of the arry must . + On output, the block diagonal eigenvalue matrix. The lenth of the array must be order * order. + + + + Multiplicative congruential generator using a modulus of 2^31-1 and a multiplier of 1132489760. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + if set to true, the class is thread safe. + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Multiplicative congruential generator using a modulus of 2^59 and a multiplier of 13^13. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + The seed is set to 1, if the zero is used as the seed. + if set to true , the class is thread safe. + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Random number generator using Mersenne Twister 19937 algorithm. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + Uses the value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + if set to true, the class is thread safe. + + + + Default instance, thread-safe. + + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Returns a random 32-bit signed integer greater than or equal to zero and less than + + + + + Fills the elements of a specified array of bytes with random numbers in full range, including zero and 255 (). + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + A 32-bit combined multiple recursive generator with 2 components of order 3. + + Based off of P. L'Ecuyer, "Combined Multiple Recursive Random Number Generators," Operations Research, 44, 5 (1996), 816--822. + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + if set to true, the class is thread safe. + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Represents a Parallel Additive Lagged Fibonacci pseudo-random number generator. + + + The type bases upon the implementation in the + Boost Random Number Library. + It uses the modulus 232 and by default the "lags" 418 and 1279. Some popular pairs are presented on + Wikipedia - Lagged Fibonacci generator. + + + + + Default value for the ShortLag + + + + + Default value for the LongLag + + + + + The multiplier to compute a double-precision floating point number [0, 1) + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + if set to true, the class is thread safe. + The ShortLag value + TheLongLag value + + + + Gets the short lag of the Lagged Fibonacci pseudo-random number generator. + + + + + Gets the long lag of the Lagged Fibonacci pseudo-random number generator. + + + + + Stores an array of random numbers + + + + + Stores an index for the random number array element that will be accessed next. + + + + + Fills the array with new unsigned random numbers. + + + Generated random numbers are 32-bit unsigned integers greater than or equal to 0 + and less than or equal to . + + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Returns a random 32-bit signed integer greater than or equal to zero and less than + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + This class implements extension methods for the System.Random class. The extension methods generate + pseudo-random distributed numbers for types other than double and int32. + + + + + Fills an array with uniform random numbers greater than or equal to 0.0 and less than 1.0. + + The random number generator. + The array to fill with random values. + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns an array of uniform random numbers greater than or equal to 0.0 and less than 1.0. + + The random number generator. + The size of the array to fill. + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns an infinite sequence of uniform random numbers greater than or equal to 0.0 and less than 1.0. + + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns an array of uniform random bytes. + + The random number generator. + The size of the array to fill. + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Fills an array with uniform random 32-bit signed integers greater than or equal to zero and less than . + + The random number generator. + The array to fill with random values. + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Fills an array with uniform random 32-bit signed integers within the specified range. + + The random number generator. + The array to fill with random values. + Lower bound, inclusive. + Upper bound, exclusive. + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns an infinite sequence of uniform random numbers greater than or equal to 0.0 and less than 1.0. + + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns a nonnegative random number less than . + + The random number generator. + + A 64-bit signed integer greater than or equal to 0, and less than ; that is, + the range of return values includes 0 but not . + + + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns a random number of the full Int32 range. + + The random number generator. + + A 32-bit signed integer of the full range, including 0, negative numbers, + and . + + + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns a random number of the full Int64 range. + + The random number generator. + + A 64-bit signed integer of the full range, including 0, negative numbers, + and . + + + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns a nonnegative decimal floating point random number less than 1.0. + + The random number generator. + + A decimal floating point number greater than or equal to 0.0, and less than 1.0; that is, + the range of return values includes 0.0 but not 1.0. + + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns a random boolean. + + The random number generator. + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Provides a time-dependent seed value, matching the default behavior of System.Random. + WARNING: There is no randomness in this seed and quick repeated calls can cause + the same seed value. Do not use for cryptography! + + + + + Provides a seed based on time and unique GUIDs. + WARNING: There is only low randomness in this seed, but at least quick repeated + calls will result in different seed values. Do not use for cryptography! + + + + + Provides a seed based on an internal random number generator (crypto if available), time and unique GUIDs. + WARNING: There is only medium randomness in this seed, but quick repeated + calls will result in different seed values. Do not use for cryptography! + + + + + Base class for random number generators. This class introduces a layer between + and the Math.Net Numerics random number generators to provide thread safety. + When used directly it use the System.Random as random number source. + + + + + Initializes a new instance of the class using + the value of to set whether + the instance is thread safe or not. + + + + + Initializes a new instance of the class. + + if set to true , the class is thread safe. + Thread safe instances are two and half times slower than non-thread + safe classes. + + + + Fills an array with uniform random numbers greater than or equal to 0.0 and less than 1.0. + + The array to fill with random values. + + + + Returns an array of uniform random numbers greater than or equal to 0.0 and less than 1.0. + + The size of the array to fill. + + + + Returns an infinite sequence of uniform random numbers greater than or equal to 0.0 and less than 1.0. + + + + + Returns a random 32-bit signed integer greater than or equal to zero and less than . + + + + + Returns a random number less then a specified maximum. + + The exclusive upper bound of the random number returned. Range: maxExclusive ≥ 1. + A 32-bit signed integer less than . + is zero or negative. + + + + Returns a random number within a specified range. + + The inclusive lower bound of the random number returned. + The exclusive upper bound of the random number returned. Range: maxExclusive > minExclusive. + + A 32-bit signed integer greater than or equal to and less than ; that is, the range of return values includes but not . If equals , is returned. + + is greater than . + + + + Fills an array with random 32-bit signed integers greater than or equal to zero and less than . + + The array to fill with random values. + + + + Returns an array with random 32-bit signed integers greater than or equal to zero and less than . + + The size of the array to fill. + + + + Fills an array with random numbers within a specified range. + + The array to fill with random values. + The exclusive upper bound of the random number returned. Range: maxExclusive ≥ 1. + + + + Returns an array with random 32-bit signed integers within the specified range. + + The size of the array to fill. + The exclusive upper bound of the random number returned. Range: maxExclusive ≥ 1. + + + + Fills an array with random numbers within a specified range. + + The array to fill with random values. + The inclusive lower bound of the random number returned. + The exclusive upper bound of the random number returned. Range: maxExclusive > minExclusive. + + + + Returns an array with random 32-bit signed integers within the specified range. + + The size of the array to fill. + The inclusive lower bound of the random number returned. + The exclusive upper bound of the random number returned. Range: maxExclusive > minExclusive. + + + + Returns an infinite sequence of random 32-bit signed integers greater than or equal to zero and less than . + + + + + Returns an infinite sequence of random numbers within a specified range. + + The inclusive lower bound of the random number returned. + The exclusive upper bound of the random number returned. Range: maxExclusive > minExclusive. + + + + Fills the elements of a specified array of bytes with random numbers. + + An array of bytes to contain random numbers. + is null. + + + + Returns a random number between 0.0 and 1.0. + + A double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Returns a random 32-bit signed integer greater than or equal to zero and less than 2147483647 (). + + + + + Fills the elements of a specified array of bytes with random numbers in full range, including zero and 255 (). + + + + + Returns a random N-bit signed integer greater than or equal to zero and less than 2^N. + N (bit count) is expected to be greater than zero and less than 32 (not verified). + + + + + Returns a random N-bit signed long integer greater than or equal to zero and less than 2^N. + N (bit count) is expected to be greater than zero and less than 64 (not verified). + + + + + Returns a random 32-bit signed integer within the specified range. + + The exclusive upper bound of the random number returned. Range: maxExclusive ≥ 2 (not verified, must be ensured by caller). + + + + Returns a random 32-bit signed integer within the specified range. + + The inclusive lower bound of the random number returned. + The exclusive upper bound of the random number returned. Range: maxExclusive ≥ minExclusive + 2 (not verified, must be ensured by caller). + + + + A random number generator based on the class in the .NET library. + + + + + Construct a new random number generator with a random seed. + + + + + Construct a new random number generator with random seed. + + if set to true , the class is thread safe. + + + + Construct a new random number generator with random seed. + + The seed value. + + + + Construct a new random number generator with random seed. + + The seed value. + if set to true , the class is thread safe. + + + + Default instance, thread-safe. + + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Returns a random 32-bit signed integer greater than or equal to zero and less than + + + + + Returns a random 32-bit signed integer within the specified range. + + The exclusive upper bound of the random number returned. Range: maxExclusive ≥ 2 (not verified, must be ensured by caller). + + + + Returns a random 32-bit signed integer within the specified range. + + The inclusive lower bound of the random number returned. + The exclusive upper bound of the random number returned. Range: maxExclusive ≥ minExclusive + 2 (not verified, must be ensured by caller). + + + + Fills the elements of a specified array of bytes with random numbers in full range, including zero and 255 (). + + + + + Fill an array with uniform random numbers greater than or equal to 0.0 and less than 1.0. + WARNING: potentially very short random sequence length, can generate repeated partial sequences. + + Parallelized on large length, but also supports being called in parallel from multiple threads + + + + Returns an array of uniform random numbers greater than or equal to 0.0 and less than 1.0. + WARNING: potentially very short random sequence length, can generate repeated partial sequences. + + Parallelized on large length, but also supports being called in parallel from multiple threads + + + + Returns an infinite sequence of uniform random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Wichmann-Hill’s 1982 combined multiplicative congruential generator. + + See: Wichmann, B. A. & Hill, I. D. (1982), "Algorithm AS 183: + An efficient and portable pseudo-random number generator". Applied Statistics 31 (1982) 188-190 + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + The seed is set to 1, if the zero is used as the seed. + if set to true , the class is thread safe. + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Wichmann-Hill’s 2006 combined multiplicative congruential generator. + + See: Wichmann, B. A. & Hill, I. D. (2006), "Generating good pseudo-random numbers". + Computational Statistics & Data Analysis 51:3 (2006) 1614-1622 + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + The seed is set to 1, if the zero is used as the seed. + if set to true , the class is thread safe. + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Implements a multiply-with-carry Xorshift pseudo random number generator (RNG) specified in Marsaglia, George. (2003). Xorshift RNGs. + Xn = a * Xn−3 + c mod 2^32 + http://www.jstatsoft.org/v08/i14/paper + + + + + The default value for X1. + + + + + The default value for X2. + + + + + The default value for the multiplier. + + + + + The default value for the carry over. + + + + + The multiplier to compute a double-precision floating point number [0, 1) + + + + + Seed or last but three unsigned random number. + + + + + Last but two unsigned random number. + + + + + Last but one unsigned random number. + + + + + The value of the carry over. + + + + + The multiplier. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + Uses the default values of: + + a = 916905990 + c = 13579 + X1 = 77465321 + X2 = 362436069 + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + The multiply value + The initial carry value. + The initial value if X1. + The initial value if X2. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + Note: must be less than . + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + Uses the default values of: + + a = 916905990 + c = 13579 + X1 = 77465321 + X2 = 362436069 + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + The multiply value + The initial carry value. + The initial value if X1. + The initial value if X2. + must be less than . + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + Uses the default values of: + + a = 916905990 + c = 13579 + X1 = 77465321 + X2 = 362436069 + + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + The multiply value + The initial carry value. + The initial value if X1. + The initial value if X2. + must be less than . + + + + Initializes a new instance of the class. + + The seed value. + if set to true, the class is thread safe. + + Uses the default values of: + + a = 916905990 + c = 13579 + X1 = 77465321 + X2 = 362436069 + + + + + Initializes a new instance of the class. + + The seed value. + if set to true, the class is thread safe. + The multiply value + The initial carry value. + The initial value if X1. + The initial value if X2. + must be less than . + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Returns a random 32-bit signed integer greater than or equal to zero and less than + + + + + Fills the elements of a specified array of bytes with random numbers in full range, including zero and 255 (). + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Bisection root-finding algorithm. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + Guess for the low value of the range where the root is supposed to be. Will be expanded if needed. + Guess for the high value of the range where the root is supposed to be. Will be expanded if needed. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Factor at which to expand the bounds, if needed. Default 1.6. + Maximum number of expand iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy for both the root and the function value at the root. The root will be refined until the accuracy or the maximum number of iterations is reached. + Maximum number of iterations. Usually 100. + The root that was found, if any. Undefined if the function returns false. + True if a root with the specified accuracy was found, else false. + + + + Algorithm by by Brent, Van Wijngaarden, Dekker et al. + Implementation inspired by Press, Teukolsky, Vetterling, and Flannery, "Numerical Recipes in C", 2nd edition, Cambridge University Press + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + Guess for the low value of the range where the root is supposed to be. Will be expanded if needed. + Guess for the high value of the range where the root is supposed to be. Will be expanded if needed. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Factor at which to expand the bounds, if needed. Default 1.6. + Maximum number of expand iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. + Maximum number of iterations. Usually 100. + The root that was found, if any. Undefined if the function returns false. + True if a root with the specified accuracy was found, else false. + + + Helper method useful for preventing rounding errors. + a*sign(b) + + + + Algorithm by Broyden. + Implementation inspired by Press, Teukolsky, Vetterling, and Flannery, "Numerical Recipes in C", 2nd edition, Cambridge University Press + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + Initial guess of the root. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + Initial guess of the root. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. + Maximum number of iterations. Usually 100. + The root that was found, if any. Undefined if the function returns false. + True if a root with the specified accuracy was found, else false. + + + + Helper method to calculate an approximation of the Jacobian. + + The function. + The argument (initial guess). + The result (of initial guess). + + + + Finds roots to the cubic equation x^3 + a2*x^2 + a1*x + a0 = 0 + Implements the cubic formula in http://mathworld.wolfram.com/CubicFormula.html + + + + + Q and R are transformed variables. + + + + + n^(1/3) - work around a negative double raised to (1/3) + + + + + Find all real-valued roots of the cubic equation a0 + a1*x + a2*x^2 + x^3 = 0. + Note the special coefficient order ascending by exponent (consistent with polynomials). + + + + + Find all three complex roots of the cubic equation d + c*x + b*x^2 + a*x^3 = 0. + Note the special coefficient order ascending by exponent (consistent with polynomials). + + + + + Pure Newton-Raphson root-finding algorithm without any recovery measures in cases it behaves badly. + The algorithm aborts immediately if the root leaves the bound interval. + + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first derivative of the function to find roots from. + The low value of the range where the root is supposed to be. Aborts if it leaves the interval. + The high value of the range where the root is supposed to be. Aborts if it leaves the interval. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first derivative of the function to find roots from. + Initial guess of the root. + The low value of the range where the root is supposed to be. Aborts if it leaves the interval. Default MinValue. + The high value of the range where the root is supposed to be. Aborts if it leaves the interval. Default MaxValue. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first derivative of the function to find roots from. + Initial guess of the root. + The low value of the range where the root is supposed to be. Aborts if it leaves the interval. + The high value of the range where the root is supposed to be. Aborts if it leaves the interval. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Example: 1e-14. + Maximum number of iterations. Example: 100. + The root that was found, if any. Undefined if the function returns false. + True if a root with the specified accuracy was found, else false. + + + + Robust Newton-Raphson root-finding algorithm that falls back to bisection when overshooting or converging too slow, or to subdivision on lacking bracketing. + + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first derivative of the function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + How many parts an interval should be split into for zero crossing scanning in case of lacking bracketing. Default 20. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first derivative of the function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Example: 1e-14. + Maximum number of iterations. Example: 100. + How many parts an interval should be split into for zero crossing scanning in case of lacking bracketing. Example: 20. + The root that was found, if any. Undefined if the function returns false. + True if a root with the specified accuracy was found, else false. + + + + Pure Secant root-finding algorithm without any recovery measures in cases it behaves badly. + The algorithm aborts immediately if the root leaves the bound interval. + + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first guess of the root within the bounds specified. + The second guess of the root within the bounds specified. + The low value of the range where the root is supposed to be. Aborts if it leaves the interval. Default MinValue. + The high value of the range where the root is supposed to be. Aborts if it leaves the interval. Default MaxValue. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first guess of the root within the bounds specified. + The second guess of the root within the bounds specified. + The low value of the range where the root is supposed to be. Aborts if it leaves the interval. + The low value of the range where the root is supposed to be. Aborts if it leaves the interval. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Example: 1e-14. + Maximum number of iterations. Example: 100. + The root that was found, if any. Undefined if the function returns false. + True if a root with the specified accuracy was found, else false + + + Detect a range containing at least one root. + The function to detect roots from. + Lower value of the range. + Upper value of the range + The growing factor of research. Usually 1.6. + Maximum number of iterations. Usually 50. + True if the bracketing operation succeeded, false otherwise. + This iterative methods stops when two values with opposite signs are found. + + + + Sorting algorithms for single, tuple and triple lists. + + + + + Sort a list of keys, in place using the quick sort algorithm using the quick sort algorithm. + + The type of elements in the key list. + List to sort. + Comparison, defining the sort order. + + + + Sort a list of keys and items with respect to the keys, in place using the quick sort algorithm. + + The type of elements in the key list. + The type of elements in the item list. + List to sort. + List to permute the same way as the key list. + Comparison, defining the sort order. + + + + Sort a list of keys, items1 and items2 with respect to the keys, in place using the quick sort algorithm. + + The type of elements in the key list. + The type of elements in the first item list. + The type of elements in the second item list. + List to sort. + First list to permute the same way as the key list. + Second list to permute the same way as the key list. + Comparison, defining the sort order. + + + + Sort a range of a list of keys, in place using the quick sort algorithm. + + The type of element in the list. + List to sort. + The zero-based starting index of the range to sort. + The length of the range to sort. + Comparison, defining the sort order. + + + + Sort a list of keys and items with respect to the keys, in place using the quick sort algorithm. + + The type of elements in the key list. + The type of elements in the item list. + List to sort. + List to permute the same way as the key list. + The zero-based starting index of the range to sort. + The length of the range to sort. + Comparison, defining the sort order. + + + + Sort a list of keys, items1 and items2 with respect to the keys, in place using the quick sort algorithm. + + The type of elements in the key list. + The type of elements in the first item list. + The type of elements in the second item list. + List to sort. + First list to permute the same way as the key list. + Second list to permute the same way as the key list. + The zero-based starting index of the range to sort. + The length of the range to sort. + Comparison, defining the sort order. + + + + Sort a list of keys and items with respect to the keys, in place using the quick sort algorithm. + + The type of elements in the primary list. + The type of elements in the secondary list. + List to sort. + List to sort on duplicate primary items, and permute the same way as the key list. + Comparison, defining the primary sort order. + Comparison, defining the secondary sort order. + + + + Recursive implementation for an in place quick sort on a list. + + The type of the list on which the quick sort is performed. + The list which is sorted using quick sort. + The method with which to compare two elements of the quick sort. + The left boundary of the quick sort. + The right boundary of the quick sort. + + + + Recursive implementation for an in place quick sort on a list while reordering one other list accordingly. + + The type of the list on which the quick sort is performed. + The type of the list which is automatically reordered accordingly. + The list which is sorted using quick sort. + The list which is automatically reordered accordingly. + The method with which to compare two elements of the quick sort. + The left boundary of the quick sort. + The right boundary of the quick sort. + + + + Recursive implementation for an in place quick sort on one list while reordering two other lists accordingly. + + The type of the list on which the quick sort is performed. + The type of the first list which is automatically reordered accordingly. + The type of the second list which is automatically reordered accordingly. + The list which is sorted using quick sort. + The first list which is automatically reordered accordingly. + The second list which is automatically reordered accordingly. + The method with which to compare two elements of the quick sort. + The left boundary of the quick sort. + The right boundary of the quick sort. + + + + Recursive implementation for an in place quick sort on the primary and then by the secondary list while reordering one secondary list accordingly. + + The type of the primary list. + The type of the secondary list. + The list which is sorted using quick sort. + The list which is sorted secondarily (on primary duplicates) and automatically reordered accordingly. + The method with which to compare two elements of the primary list. + The method with which to compare two elements of the secondary list. + The left boundary of the quick sort. + The right boundary of the quick sort. + + + + Performs an in place swap of two elements in a list. + + The type of elements stored in the list. + The list in which the elements are stored. + The index of the first element of the swap. + The index of the second element of the swap. + + + + This partial implementation of the SpecialFunctions class contains all methods related to the error function. + + + This partial implementation of the SpecialFunctions class contains all methods related to the harmonic function. + + + This partial implementation of the SpecialFunctions class contains all methods related to the logistic function. + + + This partial implementation of the SpecialFunctions class contains all methods related to the modified bessel function. + + + This partial implementation of the SpecialFunctions class contains all methods related to the modified bessel function. + + + + + Computes the logarithm of the Euler Beta function. + + The first Beta parameter, a positive real number. + The second Beta parameter, a positive real number. + The logarithm of the Euler Beta function evaluated at z,w. + If or are not positive. + + + + Computes the Euler Beta function. + + The first Beta parameter, a positive real number. + The second Beta parameter, a positive real number. + The Euler Beta function evaluated at z,w. + If or are not positive. + + + + Returns the lower incomplete (unregularized) beta function + B(a,b,x) = int(t^(a-1)*(1-t)^(b-1),t=0..x) for real a > 0, b > 0, 1 >= x >= 0. + + The first Beta parameter, a positive real number. + The second Beta parameter, a positive real number. + The upper limit of the integral. + The lower incomplete (unregularized) beta function. + + + + Returns the regularized lower incomplete beta function + I_x(a,b) = 1/Beta(a,b) * int(t^(a-1)*(1-t)^(b-1),t=0..x) for real a > 0, b > 0, 1 >= x >= 0. + + The first Beta parameter, a positive real number. + The second Beta parameter, a positive real number. + The upper limit of the integral. + The regularized lower incomplete beta function. + + + + ************************************** + COEFFICIENTS FOR METHOD ErfImp * + ************************************** + + Polynomial coefficients for a numerator of ErfImp + calculation for Erf(x) in the interval [1e-10, 0.5]. + + + + Polynomial coefficients for adenominator of ErfImp + calculation for Erf(x) in the interval [1e-10, 0.5]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [0.5, 0.75]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [0.5, 0.75]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [0.75, 1.25]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [0.75, 1.25]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [1.25, 2.25]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [1.25, 2.25]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [2.25, 3.5]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [2.25, 3.5]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [3.5, 5.25]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [3.5, 5.25]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [5.25, 8]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [5.25, 8]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [8, 11.5]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [8, 11.5]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [11.5, 17]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [11.5, 17]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [17, 24]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [17, 24]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [24, 38]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [24, 38]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [38, 60]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [38, 60]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [60, 85]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [60, 85]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [85, 110]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [85, 110]. + + + + + ************************************** + COEFFICIENTS FOR METHOD ErfInvImp * + ************************************** + + Polynomial coefficients for a numerator of ErfInvImp + calculation for Erf^-1(z) in the interval [0, 0.5]. + + + + Polynomial coefficients for a denominator of ErfInvImp + calculation for Erf^-1(z) in the interval [0, 0.5]. + + + + Polynomial coefficients for a numerator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.5, 0.75]. + + + + Polynomial coefficients for a denominator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.5, 0.75]. + + + + Polynomial coefficients for a numerator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x less than 3. + + + + Polynomial coefficients for a denominator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x less than 3. + + + + Polynomial coefficients for a numerator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x between 3 and 6. + + + + Polynomial coefficients for a denominator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x between 3 and 6. + + + + Polynomial coefficients for a numerator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x between 6 and 18. + + + + Polynomial coefficients for a denominator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x between 6 and 18. + + + + Polynomial coefficients for a numerator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x between 18 and 44. + + + + Polynomial coefficients for a denominator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x between 18 and 44. + + + + Polynomial coefficients for a numerator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x greater than 44. + + + + Polynomial coefficients for a denominator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x greater than 44. + + + + Calculates the error function. + The value to evaluate. + the error function evaluated at given value. + + + returns 1 if x == double.PositiveInfinity. + returns -1 if x == double.NegativeInfinity. + + + + + Calculates the complementary error function. + The value to evaluate. + the complementary error function evaluated at given value. + + + returns 0 if x == double.PositiveInfinity. + returns 2 if x == double.NegativeInfinity. + + + + + Calculates the inverse error function evaluated at z. + The inverse error function evaluated at given value. + + + returns double.PositiveInfinity if z >= 1.0. + returns double.NegativeInfinity if z <= -1.0. + + + Calculates the inverse error function evaluated at z. + value to evaluate. + the inverse error function evaluated at Z. + + + + Implementation of the error function. + + Where to evaluate the error function. + Whether to compute 1 - the error function. + the error function. + + + Calculates the complementary inverse error function evaluated at z. + The complementary inverse error function evaluated at given value. + We have tested this implementation against the arbitrary precision mpmath library + and found cases where we can only guarantee 9 significant figures correct. + + returns double.PositiveInfinity if z <= 0.0. + returns double.NegativeInfinity if z >= 2.0. + + + calculates the complementary inverse error function evaluated at z. + value to evaluate. + the complementary inverse error function evaluated at Z. + + + + The implementation of the inverse error function. + + First intermediate parameter. + Second intermediate parameter. + Third intermediate parameter. + the inverse error function. + + + + Computes the generalized Exponential Integral function (En). + + The argument of the Exponential Integral function. + Integer power of the denominator term. Generalization index. + The value of the Exponential Integral function. + + This implementation of the computation of the Exponential Integral function follows the derivation in + "Handbook of Mathematical Functions, Applied Mathematics Series, Volume 55", Abramowitz, M., and Stegun, I.A. 1964, reprinted 1968 by + Dover Publications, New York), Chapters 6, 7, and 26. + AND + "Advanced mathematical methods for scientists and engineers", Bender, Carl M.; Steven A. Orszag (1978). page 253 + + + for x > 1 uses continued fraction approach that is often used to compute incomplete gamma. + for 0 < x <= 1 uses Taylor series expansion + + Our unit tests suggest that the accuracy of the Exponential Integral function is correct up to 13 floating point digits. + + + + + Initializes static members of the SpecialFunctions class. + + + + + Computes the factorial function x -> x! of an integer number > 0. The function can represent all number up + to 22! exactly, all numbers up to 170! using a double representation. All larger values will overflow. + + A value value! for value > 0 + + If you need to multiply or divide various such factorials, consider using the logarithmic version + instead so you can add instead of multiply and subtract instead of divide, and + then exponentiate the result using . This will also circumvent the problem that + factorials become very large even for small parameters. + + + + + + Computes the logarithmic factorial function x -> ln(x!) of an integer number > 0. + + A value value! for value > 0 + + + + Computes the binomial coefficient: n choose k. + + A nonnegative value n. + A nonnegative value h. + The binomial coefficient: n choose k. + + + + Computes the natural logarithm of the binomial coefficient: ln(n choose k). + + A nonnegative value n. + A nonnegative value h. + The logarithmic binomial coefficient: ln(n choose k). + + + + Computes the multinomial coefficient: n choose n1, n2, n3, ... + + A nonnegative value n. + An array of nonnegative values that sum to . + The multinomial coefficient. + if is . + If or any of the are negative. + If the sum of all is not equal to . + + + + The order of the approximation. + + + + + Auxiliary variable when evaluating the function. + + + + + Polynomial coefficients for the approximation. + + + + + Computes the logarithm of the Gamma function. + + The argument of the gamma function. + The logarithm of the gamma function. + + This implementation of the computation of the gamma and logarithm of the gamma function follows the derivation in + "An Analysis Of The Lanczos Gamma Approximation", Glendon Ralph Pugh, 2004. + We use the implementation listed on p. 116 which achieves an accuracy of 16 floating point digits. Although 16 digit accuracy + should be sufficient for double values, improving accuracy is possible (see p. 126 in Pugh). + Our unit tests suggest that the accuracy of the Gamma function is correct up to 14 floating point digits. + + + + + Computes the Gamma function. + + The argument of the gamma function. + The logarithm of the gamma function. + + + This implementation of the computation of the gamma and logarithm of the gamma function follows the derivation in + "An Analysis Of The Lanczos Gamma Approximation", Glendon Ralph Pugh, 2004. + We use the implementation listed on p. 116 which should achieve an accuracy of 16 floating point digits. Although 16 digit accuracy + should be sufficient for double values, improving accuracy is possible (see p. 126 in Pugh). + + Our unit tests suggest that the accuracy of the Gamma function is correct up to 13 floating point digits. + + + + + Returns the upper incomplete regularized gamma function + Q(a,x) = 1/Gamma(a) * int(exp(-t)t^(a-1),t=0..x) for real a > 0, x > 0. + + The argument for the gamma function. + The lower integral limit. + The upper incomplete regularized gamma function. + + + + Returns the upper incomplete gamma function + Gamma(a,x) = int(exp(-t)t^(a-1),t=0..x) for real a > 0, x > 0. + + The argument for the gamma function. + The lower integral limit. + The upper incomplete gamma function. + + + + Returns the lower incomplete gamma function + gamma(a,x) = int(exp(-t)t^(a-1),t=0..x) for real a > 0, x > 0. + + The argument for the gamma function. + The upper integral limit. + The lower incomplete gamma function. + + + + Returns the lower incomplete regularized gamma function + P(a,x) = 1/Gamma(a) * int(exp(-t)t^(a-1),t=0..x) for real a > 0, x > 0. + + The argument for the gamma function. + The upper integral limit. + The lower incomplete gamma function. + + + + Returns the inverse P^(-1) of the regularized lower incomplete gamma function + P(a,x) = 1/Gamma(a) * int(exp(-t)t^(a-1),t=0..x) for real a > 0, x > 0, + such that P^(-1)(a,P(a,x)) == x. + + + + + Computes the Digamma function which is mathematically defined as the derivative of the logarithm of the gamma function. + This implementation is based on + Jose Bernardo + Algorithm AS 103: + Psi ( Digamma ) Function, + Applied Statistics, + Volume 25, Number 3, 1976, pages 315-317. + Using the modifications as in Tom Minka's lightspeed toolbox. + + The argument of the digamma function. + The value of the DiGamma function at . + + + + Computes the inverse Digamma function: this is the inverse of the logarithm of the gamma function. This function will + only return solutions that are positive. + This implementation is based on the bisection method. + + The argument of the inverse digamma function. + The positive solution to the inverse DiGamma function at . + + + + Computes the 'th Harmonic number. + + The Harmonic number which needs to be computed. + The t'th Harmonic number. + + + + Compute the generalized harmonic number of order n of m. (1 + 1/2^m + 1/3^m + ... + 1/n^m) + + The order parameter. + The power parameter. + General Harmonic number. + + + + Computes the logistic function. see: http://en.wikipedia.org/wiki/Logistic + + The parameter for which to compute the logistic function. + The logistic function of . + + + + Computes the logit function, the inverse of the sigmoid logistic function. see: http://en.wikipedia.org/wiki/Logit + + The parameter for which to compute the logit function. This number should be + between 0 and 1. + The logarithm of divided by 1.0 - . + + + + ************************************** + COEFFICIENTS FOR METHODS bessi0 * + ************************************** + + Chebyshev coefficients for exp(-x) I0(x) + in the interval [0, 8]. + + lim(x->0){ exp(-x) I0(x) } = 1. + + + + Chebyshev coefficients for exp(-x) sqrt(x) I0(x) + in the inverted interval [8, infinity]. + + lim(x->inf){ exp(-x) sqrt(x) I0(x) } = 1/sqrt(2pi). + + + + + ************************************** + COEFFICIENTS FOR METHODS bessi1 * + ************************************** + + Chebyshev coefficients for exp(-x) I1(x) / x + in the interval [0, 8]. + + lim(x->0){ exp(-x) I1(x) / x } = 1/2. + + + + Chebyshev coefficients for exp(-x) sqrt(x) I1(x) + in the inverted interval [8, infinity]. + + lim(x->inf){ exp(-x) sqrt(x) I1(x) } = 1/sqrt(2pi). + + + + + ************************************** + COEFFICIENTS FOR METHODS bessk0, bessk0e * + ************************************** + + Chebyshev coefficients for K0(x) + log(x/2) I0(x) + in the interval [0, 2]. The odd order coefficients are all + zero; only the even order coefficients are listed. + + lim(x->0){ K0(x) + log(x/2) I0(x) } = -EUL. + + + + Chebyshev coefficients for exp(x) sqrt(x) K0(x) + in the inverted interval [2, infinity]. + + lim(x->inf){ exp(x) sqrt(x) K0(x) } = sqrt(pi/2). + + + + + ************************************** + COEFFICIENTS FOR METHODS bessk1, bessk1e * + ************************************** + + Chebyshev coefficients for x(K1(x) - log(x/2) I1(x)) + in the interval [0, 2]. + + lim(x->0){ x(K1(x) - log(x/2) I1(x)) } = 1. + + + + Chebyshev coefficients for exp(x) sqrt(x) K1(x) + in the interval [2, infinity]. + + lim(x->inf){ exp(x) sqrt(x) K1(x) } = sqrt(pi/2). + + + + Returns the modified Bessel function of first kind, order 0 of the argument. +

+ The function is defined as i0(x) = j0( ix ). +

+ The range is partitioned into the two intervals [0, 8] and + (8, infinity). Chebyshev polynomial expansions are employed + in each interval. +

+ The value to compute the bessel function of. + +
+ + Returns the modified Bessel function of first kind, + order 1 of the argument. +

+ The function is defined as i1(x) = -i j1( ix ). +

+ The range is partitioned into the two intervals [0, 8] and + (8, infinity). Chebyshev polynomial expansions are employed + in each interval. +

+ The value to compute the bessel function of. + +
+ + Returns the modified Bessel function of the second kind + of order 0 of the argument. +

+ The range is partitioned into the two intervals [0, 8] and + (8, infinity). Chebyshev polynomial expansions are employed + in each interval. +

+ The value to compute the bessel function of. + +
+ + Returns the exponentially scaled modified Bessel function + of the second kind of order 0 of the argument. + + The value to compute the bessel function of. + + + + Returns the modified Bessel function of the second kind + of order 1 of the argument. +

+ The range is partitioned into the two intervals [0, 2] and + (2, infinity). Chebyshev polynomial expansions are employed + in each interval. +

+ The value to compute the bessel function of. + +
+ + Returns the exponentially scaled modified Bessel function + of the second kind of order 1 of the argument. +

+ k1e(x) = exp(x) * k1(x). +

+ The value to compute the bessel function of. + +
+ + + Returns the modified Struve function of order 0. + + The value to compute the function of. + + + + Returns the modified Struve function of order 1. + + The value to compute the function of. + + + + Returns the difference between the Bessel I0 and Struve L0 functions. + + The value to compute the function of. + + + + Returns the difference between the Bessel I1 and Struve L1 functions. + + The value to compute the function of. + + + + Numerically stable exponential minus one, i.e. x -> exp(x)-1 + + A number specifying a power. + Returns exp(power)-1. + + + + Numerically stable hypotenuse of a right angle triangle, i.e. (a,b) -> sqrt(a^2 + b^2) + + The length of side a of the triangle. + The length of side b of the triangle. + Returns sqrt(a2 + b2) without underflow/overflow. + + + + Numerically stable hypotenuse of a right angle triangle, i.e. (a,b) -> sqrt(a^2 + b^2) + + The length of side a of the triangle. + The length of side b of the triangle. + Returns sqrt(a2 + b2) without underflow/overflow. + + + + Numerically stable hypotenuse of a right angle triangle, i.e. (a,b) -> sqrt(a^2 + b^2) + + The length of side a of the triangle. + The length of side b of the triangle. + Returns sqrt(a2 + b2) without underflow/overflow. + + + + Numerically stable hypotenuse of a right angle triangle, i.e. (a,b) -> sqrt(a^2 + b^2) + + The length of side a of the triangle. + The length of side b of the triangle. + Returns sqrt(a2 + b2) without underflow/overflow. + + + + Evaluation functions, useful for function approximation. + + + + + Evaluate a polynomial at point x. + Coefficients are ordered by power with power k at index k. + Example: coefficients [3,-1,2] represent y=2x^2-x+3. + + The location where to evaluate the polynomial at. + The coefficients of the polynomial, coefficient for power k at index k. + + + + Evaluate a polynomial at point x. + Coefficients are ordered by power with power k at index k. + Example: coefficients [3,-1,2] represent y=2x^2-x+3. + + The location where to evaluate the polynomial at. + The coefficients of the polynomial, coefficient for power k at index k. + + + + Evaluate a polynomial at point x. + Coefficients are ordered by power with power k at index k. + Example: coefficients [3,-1,2] represent y=2x^2-x+3. + + The location where to evaluate the polynomial at. + The coefficients of the polynomial, coefficient for power k at index k. + + + + Numerically stable series summation + + provides the summands sequentially + Sum + + + Evaluates the series of Chebyshev polynomials Ti at argument x/2. + The series is given by +
+                  N-1
+                   - '
+            y  =   >   coef[i] T (x/2)
+                   -            i
+                  i=0
+            
+ Coefficients are stored in reverse order, i.e. the zero + order term is last in the array. Note N is the number of + coefficients, not the order. +

+ If coefficients are for the interval a to b, x must + have been transformed to x -> 2(2x - b - a)/(b-a) before + entering the routine. This maps x from (a, b) to (-1, 1), + over which the Chebyshev polynomials are defined. +

+ If the coefficients are for the inverted interval, in + which (a, b) is mapped to (1/b, 1/a), the transformation + required is x -> 2(2ab/x - b - a)/(b-a). If b is infinity, + this becomes x -> 4a/x - 1. +

+ SPEED: +

+ Taking advantage of the recurrence properties of the + Chebyshev polynomials, the routine requires one more + addition per loop than evaluating a nested polynomial of + the same degree. +

+ The coefficients of the polynomial. + Argument to the polynomial. + + Reference: https://bpm2.svn.codeplex.com/svn/Common.Numeric/Arithmetic.cs +

+ Marked as Deprecated in + http://people.apache.org/~isabel/mahout_site/mahout-matrix/apidocs/org/apache/mahout/jet/math/Arithmetic.html + + + +

+ Summation of Chebyshev polynomials, using the Clenshaw method with Reinsch modification. + + The no. of terms in the sequence. + The coefficients of the Chebyshev series, length n+1. + The value at which the series is to be evaluated. + + ORIGINAL AUTHOR: + Dr. Allan J. MacLeod; Dept. of Mathematics and Statistics, University of Paisley; High St., PAISLEY, SCOTLAND + REFERENCES: + "An error analysis of the modified Clenshaw method for evaluating Chebyshev and Fourier series" + J. Oliver, J.I.M.A., vol. 20, 1977, pp379-391 + +
+ + + Valley-shaped Rosenbrock function for 2 dimensions: (x,y) -> (1-x)^2 + 100*(y-x^2)^2. + This function has a global minimum at (1,1) with f(1,1) = 0. + Common range: [-5,10] or [-2.048,2.048]. + + + https://en.wikipedia.org/wiki/Rosenbrock_function + http://www.sfu.ca/~ssurjano/rosen.html + + + + + Valley-shaped Rosenbrock function for 2 or more dimensions. + This function have a global minimum of all ones and, for 8 > N > 3, a local minimum at (-1,1,...,1). + + + https://en.wikipedia.org/wiki/Rosenbrock_function + http://www.sfu.ca/~ssurjano/rosen.html + + + + + Himmelblau, a multi-modal function: (x,y) -> (x^2+y-11)^2 + (x+y^2-7)^2 + This function has 4 global minima with f(x,y) = 0. + Common range: [-6,6]. + Named after David Mautner Himmelblau + + + https://en.wikipedia.org/wiki/Himmelblau%27s_function + + + + + Rastrigin, a highly multi-modal function with many local minima. + Global minimum of all zeros with f(0) = 0. + Common range: [-5.12,5.12]. + + + https://en.wikipedia.org/wiki/Rastrigin_function + http://www.sfu.ca/~ssurjano/rastr.html + + + + + Drop-Wave, a multi-modal and highly complex function with many local minima. + Global minimum of all zeros with f(0) = -1. + Common range: [-5.12,5.12]. + + + http://www.sfu.ca/~ssurjano/drop.html + + + + + Ackley, a function with many local minima. It is nearly flat in outer regions but has a large hole at the center. + Global minimum of all zeros with f(0) = 0. + Common range: [-32.768, 32.768]. + + + http://www.sfu.ca/~ssurjano/ackley.html + + + + + Bowl-shaped first Bohachevsky function. + Global minimum of all zeros with f(0,0) = 0. + Common range: [-100, 100] + + + http://www.sfu.ca/~ssurjano/boha.html + + + + + Plate-shaped Matyas function. + Global minimum of all zeros with f(0,0) = 0. + Common range: [-10, 10]. + + + http://www.sfu.ca/~ssurjano/matya.html + + + + + Valley-shaped six-hump camel back function. + Two global minima and four local minima. Global minima with f(x) ) -1.0316 at (0.0898,-0.7126) and (-0.0898,0.7126). + Common range: x in [-3,3], y in [-2,2]. + + + http://www.sfu.ca/~ssurjano/camel6.html + + + + + Statistics operating on arrays assumed to be unsorted. + WARNING: Methods with the Inplace-suffix may modify the data array by reordering its entries. + + + + + + + + Returns the smallest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the smallest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the largest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the largest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the smallest value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the largest value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the smallest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the largest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the geometric mean of the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the harmonic mean of the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population variance from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the population variance from the full population provided as unsorted array. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population standard deviation from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the population standard deviation from the full population provided as unsorted array. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population variance from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN and NaN for variance if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population standard deviation from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN and NaN for standard deviation if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population covariance from the provided two sample arrays. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + First sample array. + Second sample array. + + + + Evaluates the population covariance from the full population provided as two arrays. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + First population array. + Second population array. + + + + Estimates the root mean square (RMS) also known as quadratic mean from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the order statistic (order 1..N) from the unsorted data array. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + One-based order of the statistic, must be between 1 and N (inclusive). + + + + Estimates the median value from the unsorted data array. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the p-Percentile value from the unsorted data array. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Percentile selector, between 0 and 100 (inclusive). + + + + Estimates the first quartile value from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the third quartile value from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the inter-quartile range from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates {min, lower-quantile, median, upper-quantile, max} from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the tau-th quantile from the unsorted data array. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Quantile selector, between 0.0 and 1.0 (inclusive). + + R-8, SciPy-(1/3,1/3): + Linear interpolation of the approximate medians for order statistics. + When tau < (2/3) / (N + 1/3), use x1. When tau >= (N - 1/3) / (N + 1/3), use xN. + + + + + Estimates the tau-th quantile from the unsorted data array. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile defintion can be specified + by 4 parameters a, b, c and d, consistent with Mathematica. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Quantile selector, between 0.0 and 1.0 (inclusive) + a-parameter + b-parameter + c-parameter + d-parameter + + + + Estimates the tau-th quantile from the unsorted data array. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Quantile selector, between 0.0 and 1.0 (inclusive) + Quantile definition, to choose what product/definition it should be consistent with + + + + Evaluates the rank of each entry of the unsorted data array. + The rank definition can be specified to be compatible + with an existing system. + WARNING: Works inplace and can thus causes the data array to be reordered. + + + + + Estimates the arithmetic sample mean from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the geometric mean of the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the harmonic mean of the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population variance from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the population variance from the full population provided as unsorted array. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population standard deviation from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the population standard deviation from the full population provided as unsorted array. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population variance from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN and NaN for variance if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population standard deviation from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN and NaN for standard deviation if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population covariance from the provided two sample arrays. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + First sample array. + Second sample array. + + + + Evaluates the population covariance from the full population provided as two arrays. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + First population array. + Second population array. + + + + Estimates the root mean square (RMS) also known as quadratic mean from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the smallest value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the smallest value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the smallest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the largest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the geometric mean of the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the harmonic mean of the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population variance from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the population variance from the full population provided as unsorted array. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population standard deviation from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the population standard deviation from the full population provided as unsorted array. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population variance from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN and NaN for variance if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population standard deviation from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN and NaN for standard deviation if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population covariance from the provided two sample arrays. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + First sample array. + Second sample array. + + + + Evaluates the population covariance from the full population provided as two arrays. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + First population array. + Second population array. + + + + Estimates the root mean square (RMS) also known as quadratic mean from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the order statistic (order 1..N) from the unsorted data array. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + One-based order of the statistic, must be between 1 and N (inclusive). + + + + Estimates the median value from the unsorted data array. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the p-Percentile value from the unsorted data array. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Percentile selector, between 0 and 100 (inclusive). + + + + Estimates the first quartile value from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the third quartile value from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the inter-quartile range from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates {min, lower-quantile, median, upper-quantile, max} from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the tau-th quantile from the unsorted data array. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Quantile selector, between 0.0 and 1.0 (inclusive). + + R-8, SciPy-(1/3,1/3): + Linear interpolation of the approximate medians for order statistics. + When tau < (2/3) / (N + 1/3), use x1. When tau >= (N - 1/3) / (N + 1/3), use xN. + + + + + Estimates the tau-th quantile from the unsorted data array. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile defintion can be specified + by 4 parameters a, b, c and d, consistent with Mathematica. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Quantile selector, between 0.0 and 1.0 (inclusive) + a-parameter + b-parameter + c-parameter + d-parameter + + + + Estimates the tau-th quantile from the unsorted data array. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Quantile selector, between 0.0 and 1.0 (inclusive) + Quantile definition, to choose what product/definition it should be consistent with + + + + Evaluates the rank of each entry of the unsorted data array. + The rank definition can be specified to be compatible + with an existing system. + WARNING: Works inplace and can thus causes the data array to be reordered. + + + + + A class with correlation measures between two datasets. + + + + + Computes the Pearson Product-Moment Correlation coefficient. + + Sample data A. + Sample data B. + The Pearson product-moment correlation coefficient. + + + + Computes the Weighted Pearson Product-Moment Correlation coefficient. + + Sample data A. + Sample data B. + Corresponding weights of data. + The Weighted Pearson product-moment correlation coefficient. + + + + Computes the Pearson Product-Moment Correlation matrix. + + Array of sample data vectors. + The Pearson product-moment correlation matrix. + + + + Computes the Pearson Product-Moment Correlation matrix. + + Enumerable of sample data vectors. + The Pearson product-moment correlation matrix. + + + + Computes the Spearman Ranked Correlation coefficient. + + Sample data series A. + Sample data series B. + The Spearman ranked correlation coefficient. + + + + Computes the Spearman Ranked Correlation matrix. + + Array of sample data vectors. + The Spearman ranked correlation matrix. + + + + Computes the Spearman Ranked Correlation matrix. + + Enumerable of sample data vectors. + The Spearman ranked correlation matrix. + + + + Computes the basic statistics of data set. The class meets the + NIST standard of accuracy for mean, variance, and standard deviation + (the only statistics they provide exact values for) and exceeds them + in increased accuracy mode. + Recommendation: consider to use RunningStatistics instead. + + + This type declares a DataContract for out of the box ephemeral serialization + with engines like DataContractSerializer, Protocol Buffers and FsPickler, + but does not guarantee any compatibility between versions. + It is not recommended to rely on this mechanism for durable persistance. + + + + + Initializes a new instance of the class. + + The sample data. + + If set to true, increased accuracy mode used. + Increased accuracy mode uses types for internal calculations. + + + Don't use increased accuracy for data sets containing large values (in absolute value). + This may cause the calculations to overflow. + + + + + Initializes a new instance of the class. + + The sample data. + + If set to true, increased accuracy mode used. + Increased accuracy mode uses types for internal calculations. + + + Don't use increased accuracy for data sets containing large values (in absolute value). + This may cause the calculations to overflow. + + + + + Gets the size of the sample. + + The size of the sample. + + + + Gets the sample mean. + + The sample mean. + + + + Gets the unbiased population variance estimator (on a dataset of size N will use an N-1 normalizer). + + The sample variance. + + + + Gets the unbiased population standard deviation (on a dataset of size N will use an N-1 normalizer). + + The sample standard deviation. + + + + Gets the sample skewness. + + The sample skewness. + Returns zero if is less than three. + + + + Gets the sample kurtosis. + + The sample kurtosis. + Returns zero if is less than four. + + + + Gets the maximum sample value. + + The maximum sample value. + + + + Gets the minimum sample value. + + The minimum sample value. + + + + Computes descriptive statistics from a stream of data values. + + A sequence of datapoints. + + + + Computes descriptive statistics from a stream of nullable data values. + + A sequence of datapoints. + + + + Computes descriptive statistics from a stream of data values. + + A sequence of datapoints. + + + + Computes descriptive statistics from a stream of nullable data values. + + A sequence of datapoints. + + + + Internal use. Method use for setting the statistics. + + For setting Mean. + For setting Variance. + For setting Skewness. + For setting Kurtosis. + For setting Minimum. + For setting Maximum. + For setting Count. + + + + A consists of a series of s, + each representing a region limited by a lower bound (exclusive) and an upper bound (inclusive). + + + This type declares a DataContract for out of the box ephemeral serialization + with engines like DataContractSerializer, Protocol Buffers and FsPickler, + but does not guarantee any compatibility between versions. + It is not recommended to rely on this mechanism for durable persistance. + + + + + This IComparer performs comparisons between a point and a bucket. + + + + + Compares a point and a bucket. The point will be encapsulated in a bucket with width 0. + + The first bucket to compare. + The second bucket to compare. + -1 when the point is less than this bucket, 0 when it is in this bucket and 1 otherwise. + + + + Lower Bound of the Bucket. + + + + + Upper Bound of the Bucket. + + + + + The number of datapoints in the bucket. + + + Value may be NaN if this was constructed as a argument. + + + + + Initializes a new instance of the Bucket class. + + + + + Constructs a Bucket that can be used as an argument for a + like when performing a Binary search. + + Value to look for + + + + Creates a copy of the Bucket with the lowerbound, upperbound and counts exactly equal. + + A cloned Bucket object. + + + + Width of the Bucket. + + + + + True if this is a single point argument for + when performing a Binary search. + + + + + Default comparer. + + + + + This method check whether a point is contained within this bucket. + + The point to check. + + 0 if the point falls within the bucket boundaries; + -1 if the point is smaller than the bucket, + +1 if the point is larger than the bucket. + + + + Comparison of two disjoint buckets. The buckets cannot be overlapping. + + + 0 if UpperBound and LowerBound are bit-for-bit equal + 1 if This bucket is lower that the compared bucket + -1 otherwise + + + + + Checks whether two Buckets are equal. + + + UpperBound and LowerBound are compared bit-for-bit, but This method tolerates a + difference in Count given by . + + + + + Provides a hash code for this bucket. + + + + + Formats a human-readable string for this bucket. + + + + + A class which computes histograms of data. + + + + + Contains all the Buckets of the Histogram. + + + + + Indicates whether the elements of buckets are currently sorted. + + + + + Initializes a new instance of the Histogram class. + + + + + Constructs a Histogram with a specific number of equally sized buckets. The upper and lower bound of the histogram + will be set to the smallest and largest datapoint. + + The datasequence to build a histogram on. + The number of buckets to use. + + + + Constructs a Histogram with a specific number of equally sized buckets. + + The datasequence to build a histogram on. + The number of buckets to use. + The histogram lower bound. + The histogram upper bound. + + + + Add one data point to the histogram. If the datapoint falls outside the range of the histogram, + the lowerbound or upperbound will automatically adapt. + + The datapoint which we want to add. + + + + Add a sequence of data point to the histogram. If the datapoint falls outside the range of the histogram, + the lowerbound or upperbound will automatically adapt. + + The sequence of datapoints which we want to add. + + + + Adds a Bucket to the Histogram. + + + + + Sort the buckets if needed. + + + + + Returns the Bucket that contains the value v. + + The point to search the bucket for. + A copy of the bucket containing point . + + + + Returns the index in the Histogram of the Bucket + that contains the value v. + + The point to search the bucket index for. + The index of the bucket containing the point. + + + + Returns the lower bound of the histogram. + + + + + Returns the upper bound of the histogram. + + + + + Gets the n'th bucket. + + The index of the bucket to be returned. + A copy of the n'th bucket. + + + + Gets the number of buckets. + + + + + Gets the total number of datapoints in the histogram. + + + + + Prints the buckets contained in the . + + + + + A hybrid Monte Carlo sampler for multivariate distributions. + + + + + Number of parameters in the density function. + + + + + Distribution to sample momentum from. + + + + + Standard deviations used in the sampling of different components of the + momentum. + + + + + Gets or sets the standard deviations used in the sampling of different components of the + momentum. + + When the length of pSdv is not the same as Length. + + + + Constructs a new Hybrid Monte Carlo sampler for a multivariate probability distribution. + The components of the momentum will be sampled from a normal distribution with standard deviation + 1 using the default random + number generator. A three point estimation will be used for differentiation. + This constructor will set the burn interval. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + When the number of burnInterval iteration is negative. + + + + Constructs a new Hybrid Monte Carlo sampler for a multivariate probability distribution. + The components of the momentum will be sampled from a normal distribution with standard deviation + specified by pSdv using the default random + number generator. A three point estimation will be used for differentiation. + This constructor will set the burn interval. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + The standard deviations of the normal distributions that are used to sample + the components of the momentum. + When the number of burnInterval iteration is negative. + + + + Constructs a new Hybrid Monte Carlo sampler for a multivariate probability distribution. + The components of the momentum will be sampled from a normal distribution with standard deviation + specified by pSdv using the a random number generator provided by the user. + A three point estimation will be used for differentiation. + This constructor will set the burn interval. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + The standard deviations of the normal distributions that are used to sample + the components of the momentum. + Random number generator used for sampling the momentum. + When the number of burnInterval iteration is negative. + + + + Constructs a new Hybrid Monte Carlo sampler for a multivariate probability distribution. + The components of the momentum will be sampled from a normal distribution with standard deviations + given by pSdv. This constructor will set the burn interval, the method used for + numerical differentiation and the random number generator. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + The standard deviations of the normal distributions that are used to sample + the components of the momentum. + Random number generator used for sampling the momentum. + The method used for numerical differentiation. + When the number of burnInterval iteration is negative. + When the length of pSdv is not the same as x0. + + + + Initialize parameters. + + The current location of the sampler. + + + + Checking that the location and the momentum are of the same dimension and that each component is positive. + + The standard deviations used for sampling the momentum. + When the length of pSdv is not the same as Length or if any + component is negative. + When pSdv is null. + + + + Use for copying objects in the Burn method. + + The source of copying. + A copy of the source object. + + + + Use for creating temporary objects in the Burn method. + + An object of type T. + + + + + + + + + + + + + Samples the momentum from a normal distribution. + + The momentum to be randomized. + + + + The default method used for computing the gradient. Uses a simple three point estimation. + + Function which the gradient is to be evaluated. + The location where the gradient is to be evaluated. + The gradient of the function at the point x. + + + + The Hybrid (also called Hamiltonian) Monte Carlo produces samples from distribution P using a set + of Hamiltonian equations to guide the sampling process. It uses the negative of the log density as + a potential energy, and a randomly generated momentum to set up a Hamiltonian system, which is then used + to sample the distribution. This can result in a faster convergence than the random walk Metropolis sampler + (). + + The type of samples this sampler produces. + + + + The delegate type that defines a derivative evaluated at a certain point. + + Function to be differentiated. + Value where the derivative is computed. + + + + Evaluates the energy function of the target distribution. + + + + + The current location of the sampler. + + + + + The number of burn iterations between two samples. + + + + + The size of each step in the Hamiltonian equation. + + + + + The number of iterations in the Hamiltonian equation. + + + + + The algorithm used for differentiation. + + + + + Gets or sets the number of iterations in between returning samples. + + When burn interval is negative. + + + + Gets or sets the number of iterations in the Hamiltonian equation. + + When frogleap steps is negative or zero. + + + + Gets or sets the size of each step in the Hamiltonian equation. + + When step size is negative or zero. + + + + Constructs a new Hybrid Monte Carlo sampler. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + Random number generator used for sampling the momentum. + The method used for differentiation. + When the number of burnInterval iteration is negative. + When either x0, pdfLnP or diff is null. + + + + Returns a sample from the distribution P. + + + + + This method runs the sampler for a number of iterations without returning a sample + + + + + Method used to update the sample location. Used in the end of the loop. + + The old energy. + The old gradient/derivative of the energy. + The new sample. + The new gradient/derivative of the energy. + The new energy. + The difference between the old Hamiltonian and new Hamiltonian. Use to determine + if an update should take place. + + + + Use for creating temporary objects in the Burn method. + + An object of type T. + + + + Use for copying objects in the Burn method. + + The source of copying. + A copy of the source object. + + + + Method for doing dot product. + + First vector/scalar in the product. + Second vector/scalar in the product. + + + + Method for adding, multiply the second vector/scalar by factor and then + add it to the first vector/scalar. + + First vector/scalar. + Scalar factor multiplying by the second vector/scalar. + Second vector/scalar. + + + + Multiplying the second vector/scalar by factor and then subtract it from + the first vector/scalar. + + First vector/scalar. + Scalar factor to be multiplied to the second vector/scalar. + Second vector/scalar. + + + + Method for sampling a random momentum. + + Momentum to be randomized. + + + + The Hamiltonian equations that is used to produce the new sample. + + + + + Method to compute the Hamiltonian used in the method. + + The momentum. + The energy. + Hamiltonian=E+p.p/2 + + + + Method to check and set a quantity to a non-negative value. + + Proposed value to be checked. + Returns value if it is greater than or equal to zero. + Throws when value is negative. + + + + Method to check and set a quantity to a non-negative value. + + Proposed value to be checked. + Returns value if it is greater than to zero. + Throws when value is negative or zero. + + + + Method to check and set a quantity to a non-negative value. + + Proposed value to be checked. + Returns value if it is greater than zero. + Throws when value is negative or zero. + + + + Provides utilities to analysis the convergence of a set of samples from + a . + + + + + Computes the auto correlations of a series evaluated by a function f. + + The series for computing the auto correlation. + The lag in the series + The function used to evaluate the series. + The auto correlation. + Throws if lag is zero or if lag is + greater than or equal to the length of Series. + + + + Computes the effective size of the sample when evaluated by a function f. + + The samples. + The function use for evaluating the series. + The effective size when auto correlation is taken into account. + + + + A method which samples datapoints from a proposal distribution. The implementation of this sampler + is stateless: no variables are saved between two calls to Sample. This proposal is different from + in that it doesn't take any parameters; it samples random + variables from the whole domain. + + The type of the datapoints. + A sample from the proposal distribution. + + + + A method which samples datapoints from a proposal distribution given an initial sample. The implementation + of this sampler is stateless: no variables are saved between two calls to Sample. This proposal is different from + in that it samples locally around an initial point. In other words, it + makes a small local move rather than producing a global sample from the proposal. + + The type of the datapoints. + The initial sample. + A sample from the proposal distribution. + + + + A function which evaluates a density. + + The type of data the distribution is over. + The sample we want to evaluate the density for. + + + + A function which evaluates a log density. + + The type of data the distribution is over. + The sample we want to evaluate the log density for. + + + + A function which evaluates the log of a transition kernel probability. + + The type for the space over which this transition kernel is defined. + The new state in the transition. + The previous state in the transition. + The log probability of the transition. + + + + The interface which every sampler must implement. + + The type of samples this sampler produces. + + + + The random number generator for this class. + + + + + Keeps track of the number of accepted samples. + + + + + Keeps track of the number of calls to the proposal sampler. + + + + + Initializes a new instance of the class. + + Thread safe instances are two and half times slower than non-thread + safe classes. + + + + Gets or sets the random number generator. + + When the random number generator is null. + + + + Returns one sample. + + + + + Returns a number of samples. + + The number of samples we want. + An array of samples. + + + + Gets the acceptance rate of the sampler. + + + + + Metropolis-Hastings sampling produces samples from distribition P by sampling from a proposal distribution Q + and accepting/rejecting based on the density of P. Metropolis-Hastings sampling doesn't require that the + proposal distribution Q is symmetric in comparison to . It does need to + be able to evaluate the proposal sampler's log density though. All densities are required to be in log space. + + The Metropolis-Hastings sampler is a stateful sampler. It keeps track of where it currently is in the domain + of the distribution P. + + The type of samples this sampler produces. + + + + Evaluates the log density function of the target distribution. + + + + + Evaluates the log transition probability for the proposal distribution. + + + + + A function which samples from a proposal distribution. + + + + + The current location of the sampler. + + + + + The log density at the current location. + + + + + The number of burn iterations between two samples. + + + + + Constructs a new Metropolis-Hastings sampler using the default random number generator. This + constructor will set the burn interval. + + The initial sample. + The log density of the distribution we want to sample from. + The log transition probability for the proposal distribution. + A method that samples from the proposal distribution. + The number of iterations in between returning samples. + When the number of burnInterval iteration is negative. + + + + Gets or sets the number of iterations in between returning samples. + + When burn interval is negative. + + + + This method runs the sampler for a number of iterations without returning a sample + + + + + Returns a sample from the distribution P. + + + + + Metropolis sampling produces samples from distribition P by sampling from a proposal distribution Q + and accepting/rejecting based on the density of P. Metropolis sampling requires that the proposal + distribution Q is symmetric. All densities are required to be in log space. + + The Metropolis sampler is a stateful sampler. It keeps track of where it currently is in the domain + of the distribution P. + + The type of samples this sampler produces. + + + + Evaluates the log density function of the sampling distribution. + + + + + A function which samples from a proposal distribution. + + + + + The current location of the sampler. + + + + + The log density at the current location. + + + + + The number of burn iterations between two samples. + + + + + Constructs a new Metropolis sampler using the default random number generator. + + The initial sample. + The log density of the distribution we want to sample from. + A method that samples from the symmetric proposal distribution. + The number of iterations in between returning samples. + When the number of burnInterval iteration is negative. + + + + Gets or sets the number of iterations in between returning samples. + + When burn interval is negative. + + + + This method runs the sampler for a number of iterations without returning a sample + + + + + Returns a sample from the distribution P. + + + + + Rejection sampling produces samples from distribition P by sampling from a proposal distribution Q + and accepting/rejecting based on the density of P and Q. The density of P and Q don't need to + to be normalized, but we do need that for each x, P(x) < Q(x). + + The type of samples this sampler produces. + + + + Evaluates the density function of the sampling distribution. + + + + + Evaluates the density function of the proposal distribution. + + + + + A function which samples from a proposal distribution. + + + + + Constructs a new rejection sampler using the default random number generator. + + The density of the distribution we want to sample from. + The density of the proposal distribution. + A method that samples from the proposal distribution. + + + + Returns a sample from the distribution P. + + When the algorithms detects that the proposal + distribution doesn't upper bound the target distribution. + + + + A hybrid Monte Carlo sampler for univariate distributions. + + + + + Distribution to sample momentum from. + + + + + Standard deviations used in the sampling of the + momentum. + + + + + Gets or sets the standard deviation used in the sampling of the + momentum. + + When standard deviation is negative. + + + + Constructs a new Hybrid Monte Carlo sampler for a univariate probability distribution. + The momentum will be sampled from a normal distribution with standard deviation + specified by pSdv using the default random + number generator. A three point estimation will be used for differentiation. + This constructor will set the burn interval. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + The standard deviation of the normal distribution that is used to sample + the momentum. + When the number of burnInterval iteration is negative. + + + + Constructs a new Hybrid Monte Carlo sampler for a univariate probability distribution. + The momentum will be sampled from a normal distribution with standard deviation + specified by pSdv using a random + number generator provided by the user. A three point estimation will be used for differentiation. + This constructor will set the burn interval. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + The standard deviation of the normal distribution that is used to sample + the momentum. + Random number generator used to sample the momentum. + When the number of burnInterval iteration is negative. + + + + Constructs a new Hybrid Monte Carlo sampler for a multivariate probability distribution. + The momentum will be sampled from a normal distribution with standard deviation + given by pSdv using a random + number generator provided by the user. This constructor will set both the burn interval and the method used for + numerical differentiation. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + The standard deviation of the normal distribution that is used to sample + the momentum. + The method used for numerical differentiation. + Random number generator used for sampling the momentum. + When the number of burnInterval iteration is negative. + + + + Use for copying objects in the Burn method. + + The source of copying. + A copy of the source object. + + + + Use for creating temporary objects in the Burn method. + + An object of type T. + + + + + + + + + + + + + Samples the momentum from a normal distribution. + + The momentum to be randomized. + + + + The default method used for computing the derivative. Uses a simple three point estimation. + + Function for which the derivative is to be evaluated. + The location where the derivative is to be evaluated. + The derivative of the function at the point x. + + + + Slice sampling produces samples from distribition P by uniformly sampling from under the pdf of P using + a technique described in "Slice Sampling", R. Neal, 2003. All densities are required to be in log space. + + The slice sampler is a stateful sampler. It keeps track of where it currently is in the domain + of the distribution P. + + + + + Evaluates the log density function of the target distribution. + + + + + The current location of the sampler. + + + + + The log density at the current location. + + + + + The number of burn iterations between two samples. + + + + + The scale of the slice sampler. + + + + + Constructs a new Slice sampler using the default random + number generator. The burn interval will be set to 0. + + The initial sample. + The density of the distribution we want to sample from. + The scale factor of the slice sampler. + When the scale of the slice sampler is not positive. + + + + Constructs a new slice sampler using the default random number generator. It + will set the number of burnInterval iterations and run a burnInterval phase. + + The initial sample. + The density of the distribution we want to sample from. + The number of iterations in between returning samples. + The scale factor of the slice sampler. + When the number of burnInterval iteration is negative. + When the scale of the slice sampler is not positive. + + + + Gets or sets the number of iterations in between returning samples. + + When burn interval is negative. + + + + Gets or sets the scale of the slice sampler. + + + + + This method runs the sampler for a number of iterations without returning a sample + + + + + Returns a sample from the distribution P. + + + + + Running statistics over a window of data, allows updating by adding values. + + + + + Gets the total number of samples. + + + + + Returns the minimum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + + + + Returns the maximum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + + + + Evaluates the sample mean, an estimate of the population mean. + Returns NaN if data is empty or if any entry is NaN. + + + + + Estimates the unbiased population variance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + + + + Evaluates the variance from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + + + + Estimates the unbiased population standard deviation from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + + + + Evaluates the standard deviation from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + + + + Update the running statistics by adding another observed sample (in-place). + + + + + Update the running statistics by adding a sequence of observed sample (in-place). + + + + Replace ties with their mean (non-integer ranks). Default. + + + Replace ties with their minimum (typical sports ranking). + + + Replace ties with their maximum. + + + Permutation with increasing values at each index of ties. + + + + Running statistics accumulator, allows updating by adding values + or by combining two accumulators. + + + This type declares a DataContract for out of the box ephemeral serialization + with engines like DataContractSerializer, Protocol Buffers and FsPickler, + but does not guarantee any compatibility between versions. + It is not recommended to rely on this mechanism for durable persistance. + + + + + Gets the total number of samples. + + + + + Returns the minimum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + + + + Returns the maximum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + + + + Evaluates the sample mean, an estimate of the population mean. + Returns NaN if data is empty or if any entry is NaN. + + + + + Estimates the unbiased population variance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + + + + Evaluates the variance from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + + + + Estimates the unbiased population standard deviation from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + + + + Evaluates the standard deviation from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + + + + Estimates the unbiased population skewness from the provided samples. + Uses a normalizer (Bessel's correction; type 2). + Returns NaN if data has less than three entries or if any entry is NaN. + + + + + Evaluates the population skewness from the full population. + Does not use a normalizer and would thus be biased if applied to a subset (type 1). + Returns NaN if data has less than two entries or if any entry is NaN. + + + + + Estimates the unbiased population kurtosis from the provided samples. + Uses a normalizer (Bessel's correction; type 2). + Returns NaN if data has less than four entries or if any entry is NaN. + + + + + Evaluates the population kurtosis from the full population. + Does not use a normalizer and would thus be biased if applied to a subset (type 1). + Returns NaN if data has less than three entries or if any entry is NaN. + + + + + Update the running statistics by adding another observed sample (in-place). + + + + + Update the running statistics by adding a sequence of observed sample (in-place). + + + + + Create a new running statistics over the combined samples of two existing running statistics. + + + + + Statistics operating on an array already sorted ascendingly. + + + + + + + + Returns the smallest value from the sorted data array (ascending). + + Sample array, must be sorted ascendingly. + + + + Returns the largest value from the sorted data array (ascending). + + Sample array, must be sorted ascendingly. + + + + Returns the order statistic (order 1..N) from the sorted data array (ascending). + + Sample array, must be sorted ascendingly. + One-based order of the statistic, must be between 1 and N (inclusive). + + + + Estimates the median value from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the p-Percentile value from the sorted data array (ascending). + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + Percentile selector, between 0 and 100 (inclusive). + + + + Estimates the first quartile value from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the third quartile value from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the inter-quartile range from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates {min, lower-quantile, median, upper-quantile, max} from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the tau-th quantile from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + Quantile selector, between 0.0 and 1.0 (inclusive). + + R-8, SciPy-(1/3,1/3): + Linear interpolation of the approximate medians for order statistics. + When tau < (2/3) / (N + 1/3), use x1. When tau >= (N - 1/3) / (N + 1/3), use xN. + + + + + Estimates the tau-th quantile from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile defintion can be specified + by 4 parameters a, b, c and d, consistent with Mathematica. + + Sample array, must be sorted ascendingly. + Quantile selector, between 0.0 and 1.0 (inclusive). + a-parameter + b-parameter + c-parameter + d-parameter + + + + Estimates the tau-th quantile from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + Sample array, must be sorted ascendingly. + Quantile selector, between 0.0 and 1.0 (inclusive). + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the empirical cumulative distribution function (CDF) at x from the sorted data array (ascending). + + The data sample sequence. + The value where to estimate the CDF at. + + + + Estimates the quantile tau from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile value. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Evaluates the rank of each entry of the sorted data array (ascending). + The rank definition can be specified to be compatible + with an existing system. + + + + + Returns the smallest value from the sorted data array (ascending). + + Sample array, must be sorted ascendingly. + + + + Returns the largest value from the sorted data array (ascending). + + Sample array, must be sorted ascendingly. + + + + Returns the order statistic (order 1..N) from the sorted data array (ascending). + + Sample array, must be sorted ascendingly. + One-based order of the statistic, must be between 1 and N (inclusive). + + + + Estimates the median value from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the p-Percentile value from the sorted data array (ascending). + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + Percentile selector, between 0 and 100 (inclusive). + + + + Estimates the first quartile value from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the third quartile value from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the inter-quartile range from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates {min, lower-quantile, median, upper-quantile, max} from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the tau-th quantile from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + Quantile selector, between 0.0 and 1.0 (inclusive). + + R-8, SciPy-(1/3,1/3): + Linear interpolation of the approximate medians for order statistics. + When tau < (2/3) / (N + 1/3), use x1. When tau >= (N - 1/3) / (N + 1/3), use xN. + + + + + Estimates the tau-th quantile from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile defintion can be specified + by 4 parameters a, b, c and d, consistent with Mathematica. + + Sample array, must be sorted ascendingly. + Quantile selector, between 0.0 and 1.0 (inclusive). + a-parameter + b-parameter + c-parameter + d-parameter + + + + Estimates the tau-th quantile from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + Sample array, must be sorted ascendingly. + Quantile selector, between 0.0 and 1.0 (inclusive). + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the empirical cumulative distribution function (CDF) at x from the sorted data array (ascending). + + The data sample sequence. + The value where to estimate the CDF at. + + + + Estimates the quantile tau from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile value. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Evaluates the rank of each entry of the sorted data array (ascending). + The rank definition can be specified to be compatible + with an existing system. + + + + + Extension methods to return basic statistics on set of data. + + + + + Returns the minimum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Returns the minimum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Returns the minimum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + Null-entries are ignored. + + The sample data. + The minimum value in the sample data. + + + + Returns the maximum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The maximum value in the sample data. + + + + Returns the maximum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The maximum value in the sample data. + + + + Returns the maximum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + Null-entries are ignored. + + The sample data. + The maximum value in the sample data. + + + + Returns the minimum absolute value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Returns the minimum absolute value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Returns the maximum absolute value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The maximum value in the sample data. + + + + Returns the maximum absolute value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The maximum value in the sample data. + + + + Returns the minimum magnitude and phase value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Returns the minimum magnitude and phase value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Returns the maximum magnitude and phase value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Returns the maximum magnitude and phase value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Evaluates the sample mean, an estimate of the population mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the mean of. + The mean of the sample. + + + + Evaluates the sample mean, an estimate of the population mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the mean of. + The mean of the sample. + + + + Evaluates the sample mean, an estimate of the population mean. + Returns NaN if data is empty or if any entry is NaN. + Null-entries are ignored. + + The data to calculate the mean of. + The mean of the sample. + + + + Evaluates the geometric mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the geometric mean of. + The geometric mean of the sample. + + + + Evaluates the geometric mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the geometric mean of. + The geometric mean of the sample. + + + + Evaluates the harmonic mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the harmonic mean of. + The harmonic mean of the sample. + + + + Evaluates the harmonic mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the harmonic mean of. + The harmonic mean of the sample. + + + + Estimates the unbiased population variance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population variance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population variance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + Null-entries are ignored. + + A subset of samples, sampled from the full population. + + + + Evaluates the variance from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + The full population data. + + + + Evaluates the variance from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + The full population data. + + + + Evaluates the variance from the provided full population. + On a dataset of size N will use an N normalize and would thus be biased if applied to a subsetr. + Returns NaN if data is empty or if any entry is NaN. + Null-entries are ignored. + + The full population data. + + + + Estimates the unbiased population standard deviation from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population standard deviation from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population standard deviation from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + Null-entries are ignored. + + A subset of samples, sampled from the full population. + + + + Evaluates the standard deviation from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + The full population data. + + + + Evaluates the standard deviation from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + The full population data. + + + + Evaluates the standard deviation from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + Null-entries are ignored. + + The full population data. + + + + Estimates the unbiased population skewness from the provided samples. + Uses a normalizer (Bessel's correction; type 2). + Returns NaN if data has less than three entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population skewness from the provided samples. + Uses a normalizer (Bessel's correction; type 2). + Returns NaN if data has less than three entries or if any entry is NaN. + Null-entries are ignored. + + A subset of samples, sampled from the full population. + + + + Evaluates the skewness from the full population. + Does not use a normalizer and would thus be biased if applied to a subset (type 1). + Returns NaN if data has less than two entries or if any entry is NaN. + + The full population data. + + + + Evaluates the skewness from the full population. + Does not use a normalizer and would thus be biased if applied to a subset (type 1). + Returns NaN if data has less than two entries or if any entry is NaN. + Null-entries are ignored. + + The full population data. + + + + Estimates the unbiased population kurtosis from the provided samples. + Uses a normalizer (Bessel's correction; type 2). + Returns NaN if data has less than four entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population kurtosis from the provided samples. + Uses a normalizer (Bessel's correction; type 2). + Returns NaN if data has less than four entries or if any entry is NaN. + Null-entries are ignored. + + A subset of samples, sampled from the full population. + + + + Evaluates the kurtosis from the full population. + Does not use a normalizer and would thus be biased if applied to a subset (type 1). + Returns NaN if data has less than three entries or if any entry is NaN. + + The full population data. + + + + Evaluates the kurtosis from the full population. + Does not use a normalizer and would thus be biased if applied to a subset (type 1). + Returns NaN if data has less than three entries or if any entry is NaN. + Null-entries are ignored. + + The full population data. + + + + Estimates the sample mean and the unbiased population variance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or if any entry is NaN and NaN for variance if data has less than two entries or if any entry is NaN. + + The data to calculate the mean of. + The mean of the sample. + + + + Estimates the sample mean and the unbiased population variance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or if any entry is NaN and NaN for variance if data has less than two entries or if any entry is NaN. + + The data to calculate the mean of. + The mean of the sample. + + + + Estimates the sample mean and the unbiased population standard deviation from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or if any entry is NaN and NaN for standard deviation if data has less than two entries or if any entry is NaN. + + The data to calculate the mean of. + The mean of the sample. + + + + Estimates the sample mean and the unbiased population standard deviation from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or if any entry is NaN and NaN for standard deviation if data has less than two entries or if any entry is NaN. + + The data to calculate the mean of. + The mean of the sample. + + + + Estimates the unbiased population skewness and kurtosis from the provided samples in a single pass. + Uses a normalizer (Bessel's correction; type 2). + + A subset of samples, sampled from the full population. + + + + Evaluates the skewness and kurtosis from the full population. + Does not use a normalizer and would thus be biased if applied to a subset (type 1). + + The full population data. + + + + Estimates the unbiased population covariance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population covariance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population covariance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + Null-entries are ignored. + + A subset of samples, sampled from the full population. + A subset of samples, sampled from the full population. + + + + Evaluates the population covariance from the provided full populations. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + The full population data. + The full population data. + + + + Evaluates the population covariance from the provided full populations. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + The full population data. + The full population data. + + + + Evaluates the population covariance from the provided full populations. + On a dataset of size N will use an N normalize and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + Null-entries are ignored. + + The full population data. + The full population data. + + + + Evaluates the root mean square (RMS) also known as quadratic mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the RMS of. + + + + Evaluates the root mean square (RMS) also known as quadratic mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the RMS of. + + + + Evaluates the root mean square (RMS) also known as quadratic mean. + Returns NaN if data is empty or if any entry is NaN. + Null-entries are ignored. + + The data to calculate the mean of. + + + + Estimates the sample median from the provided samples (R8). + + The data sample sequence. + + + + Estimates the sample median from the provided samples (R8). + + The data sample sequence. + + + + Estimates the sample median from the provided samples (R8). + + The data sample sequence. + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the p-Percentile value from the provided samples. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + Percentile selector, between 0 and 100 (inclusive). + + + + Estimates the p-Percentile value from the provided samples. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + Percentile selector, between 0 and 100 (inclusive). + + + + Estimates the p-Percentile value from the provided samples. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + Percentile selector, between 0 and 100 (inclusive). + + + + Estimates the p-Percentile value from the provided samples. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the p-Percentile value from the provided samples. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the p-Percentile value from the provided samples. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the first quartile value from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the first quartile value from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the first quartile value from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the third quartile value from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the third quartile value from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the third quartile value from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the inter-quartile range from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the inter-quartile range from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the inter-quartile range from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates {min, lower-quantile, median, upper-quantile, max} from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates {min, lower-quantile, median, upper-quantile, max} from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates {min, lower-quantile, median, upper-quantile, max} from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Returns the order statistic (order 1..N) from the provided samples. + + The data sample sequence. + One-based order of the statistic, must be between 1 and N (inclusive). + + + + Returns the order statistic (order 1..N) from the provided samples. + + The data sample sequence. + One-based order of the statistic, must be between 1 and N (inclusive). + + + + Returns the order statistic (order 1..N) from the provided samples. + + The data sample sequence. + + + + Returns the order statistic (order 1..N) from the provided samples. + + The data sample sequence. + + + + Evaluates the rank of each entry of the provided samples. + The rank definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Evaluates the rank of each entry of the provided samples. + The rank definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Evaluates the rank of each entry of the provided samples. + The rank definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Estimates the quantile tau from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile value. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Estimates the quantile tau from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile value. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Estimates the quantile tau from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile value. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Estimates the quantile tau from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Estimates the quantile tau from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Estimates the quantile tau from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Estimates the empirical cumulative distribution function (CDF) at x from the provided samples. + + The data sample sequence. + The value where to estimate the CDF at. + + + + Estimates the empirical cumulative distribution function (CDF) at x from the provided samples. + + The data sample sequence. + The value where to estimate the CDF at. + + + + Estimates the empirical cumulative distribution function (CDF) at x from the provided samples. + + The data sample sequence. + The value where to estimate the CDF at. + + + + Estimates the empirical cumulative distribution function (CDF) at x from the provided samples. + + The data sample sequence. + + + + Estimates the empirical cumulative distribution function (CDF) at x from the provided samples. + + The data sample sequence. + + + + Estimates the empirical cumulative distribution function (CDF) at x from the provided samples. + + The data sample sequence. + + + + Estimates the empirical inverse CDF at tau from the provided samples. + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + + + + Estimates the empirical inverse CDF at tau from the provided samples. + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + + + + Estimates the empirical inverse CDF at tau from the provided samples. + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + + + + Estimates the empirical inverse CDF at tau from the provided samples. + + The data sample sequence. + + + + Estimates the empirical inverse CDF at tau from the provided samples. + + The data sample sequence. + + + + Estimates the empirical inverse CDF at tau from the provided samples. + + The data sample sequence. + + + + Calculates the entropy of a stream of double values in bits. + Returns NaN if any of the values in the stream are NaN. + + The data sample sequence. + + + + Calculates the entropy of a stream of double values in bits. + Returns NaN if any of the values in the stream are NaN. + Null-entries are ignored. + + The data sample sequence. + + + + Evaluates the sample mean over a moving window, for each samples. + Returns NaN if no data is empty or if any entry is NaN. + + The sample stream to calculate the mean of. + The number of last samples to consider. + + + + Statistics operating on an IEnumerable in a single pass, without keeping the full data in memory. + Can be used in a streaming way, e.g. on large datasets not fitting into memory. + + + + + + + + Returns the smallest value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the smallest value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the largest value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the largest value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the smallest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the smallest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the largest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the largest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the smallest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the smallest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the largest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the largest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the arithmetic sample mean from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the arithmetic sample mean from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the geometric mean of the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the geometric mean of the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the harmonic mean of the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the harmonic mean of the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the unbiased population variance from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the unbiased population variance from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the population variance from the full population provided as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the population variance from the full population provided as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the unbiased population standard deviation from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the unbiased population standard deviation from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the population standard deviation from the full population provided as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the population standard deviation from the full population provided as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population variance from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN, and NaN for variance if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population variance from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN, and NaN for variance if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population standard deviation from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN, and NaN for standard deviation if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population standard deviation from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN, and NaN for standard deviation if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the unbiased population covariance from the provided two sample enumerable sequences, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + First sample stream. + Second sample stream. + + + + Estimates the unbiased population covariance from the provided two sample enumerable sequences, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + First sample stream. + Second sample stream. + + + + Evaluates the population covariance from the full population provided as two enumerable sequences, in a single pass without memoization. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + First population stream. + Second population stream. + + + + Evaluates the population covariance from the full population provided as two enumerable sequences, in a single pass without memoization. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + First population stream. + Second population stream. + + + + Estimates the root mean square (RMS) also known as quadratic mean from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the root mean square (RMS) also known as quadratic mean from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Calculates the entropy of a stream of double values. + Returns NaN if any of the values in the stream are NaN. + + The input stream to evaluate. + + + + + Used to simplify parallel code, particularly between the .NET 4.0 and Silverlight Code. + + + + + Executes a for loop in which iterations may run in parallel. + + The start index, inclusive. + The end index, exclusive. + The body to be invoked for each iteration range. + + + + Executes a for loop in which iterations may run in parallel. + + The start index, inclusive. + The end index, exclusive. + The partition size for splitting work into smaller pieces. + The body to be invoked for each iteration range. + + + + Executes each of the provided actions inside a discrete, asynchronous task. + + An array of actions to execute. + The actions array contains a null element. + At least one invocation of the actions threw an exception. + + + + Selects an item (such as Max or Min). + + Starting index of the loop. + Ending index of the loop + The function to select items over a subset. + The function to select the item of selection from the subsets. + The selected value. + + + + Selects an item (such as Max or Min). + + The array to iterate over. + The function to select items over a subset. + The function to select the item of selection from the subsets. + The selected value. + + + + Selects an item (such as Max or Min). + + Starting index of the loop. + Ending index of the loop + The function to select items over a subset. + The function to select the item of selection from the subsets. + Default result of the reduce function on an empty set. + The selected value. + + + + Selects an item (such as Max or Min). + + The array to iterate over. + The function to select items over a subset. + The function to select the item of selection from the subsets. + Default result of the reduce function on an empty set. + The selected value. + + + + Double-precision trigonometry toolkit. + + + + + Constant to convert a degree to grad. + + + + + Converts a degree (360-periodic) angle to a grad (400-periodic) angle. + + The degree to convert. + The converted grad angle. + + + + Converts a degree (360-periodic) angle to a radian (2*Pi-periodic) angle. + + The degree to convert. + The converted radian angle. + + + + Converts a grad (400-periodic) angle to a degree (360-periodic) angle. + + The grad to convert. + The converted degree. + + + + Converts a grad (400-periodic) angle to a radian (2*Pi-periodic) angle. + + The grad to convert. + The converted radian. + + + + Converts a radian (2*Pi-periodic) angle to a degree (360-periodic) angle. + + The radian to convert. + The converted degree. + + + + Converts a radian (2*Pi-periodic) angle to a grad (400-periodic) angle. + + The radian to convert. + The converted grad. + + + + Normalized Sinc function. sinc(x) = sin(pi*x)/(pi*x). + + + + + Trigonometric Sine of an angle in radian, or opposite / hypotenuse. + + The angle in radian. + The sine of the radian angle. + + + + Trigonometric Sine of a Complex number. + + The complex value. + The sine of the complex number. + + + + Trigonometric Cosine of an angle in radian, or adjacent / hypotenuse. + + The angle in radian. + The cosine of an angle in radian. + + + + Trigonometric Cosine of a Complex number. + + The complex value. + The cosine of a complex number. + + + + Trigonometric Tangent of an angle in radian, or opposite / adjacent. + + The angle in radian. + The tangent of the radian angle. + + + + Trigonometric Tangent of a Complex number. + + The complex value. + The tangent of the complex number. + + + + Trigonometric Cotangent of an angle in radian, or adjacent / opposite. Reciprocal of the tangent. + + The angle in radian. + The cotangent of an angle in radian. + + + + Trigonometric Cotangent of a Complex number. + + The complex value. + The cotangent of the complex number. + + + + Trigonometric Secant of an angle in radian, or hypotenuse / adjacent. Reciprocal of the cosine. + + The angle in radian. + The secant of the radian angle. + + + + Trigonometric Secant of a Complex number. + + The complex value. + The secant of the complex number. + + + + Trigonometric Cosecant of an angle in radian, or hypotenuse / opposite. Reciprocal of the sine. + + The angle in radian. + Cosecant of an angle in radian. + + + + Trigonometric Cosecant of a Complex number. + + The complex value. + The cosecant of a complex number. + + + + Trigonometric principal Arc Sine in radian + + The opposite for a unit hypotenuse (i.e. opposite / hyptenuse). + The angle in radian. + + + + Trigonometric principal Arc Sine of this Complex number. + + The complex value. + The arc sine of a complex number. + + + + Trigonometric principal Arc Cosine in radian + + The adjacent for a unit hypotenuse (i.e. adjacent / hypotenuse). + The angle in radian. + + + + Trigonometric principal Arc Cosine of this Complex number. + + The complex value. + The arc cosine of a complex number. + + + + Trigonometric principal Arc Tangent in radian + + The opposite for a unit adjacent (i.e. opposite / adjacent). + The angle in radian. + + + + Trigonometric principal Arc Tangent of this Complex number. + + The complex value. + The arc tangent of a complex number. + + + + Trigonometric principal Arc Cotangent in radian + + The adjacent for a unit opposite (i.e. adjacent / opposite). + The angle in radian. + + + + Trigonometric principal Arc Cotangent of this Complex number. + + The complex value. + The arc cotangent of a complex number. + + + + Trigonometric principal Arc Secant in radian + + The hypotenuse for a unit adjacent (i.e. hypotenuse / adjacent). + The angle in radian. + + + + Trigonometric principal Arc Secant of this Complex number. + + The complex value. + The arc secant of a complex number. + + + + Trigonometric principal Arc Cosecant in radian + + The hypotenuse for a unit opposite (i.e. hypotenuse / opposite). + The angle in radian. + + + + Trigonometric principal Arc Cosecant of this Complex number. + + The complex value. + The arc cosecant of a complex number. + + + + Hyperbolic Sine + + The hyperbolic angle, i.e. the area of the hyperbolic sector. + The hyperbolic sine of the angle. + + + + Hyperbolic Sine of a Complex number. + + The complex value. + The hyperbolic sine of a complex number. + + + + Hyperbolic Cosine + + The hyperbolic angle, i.e. the area of the hyperbolic sector. + The hyperbolic Cosine of the angle. + + + + Hyperbolic Cosine of a Complex number. + + The complex value. + The hyperbolic cosine of a complex number. + + + + Hyperbolic Tangent in radian + + The hyperbolic angle, i.e. the area of the hyperbolic sector. + The hyperbolic tangent of the angle. + + + + Hyperbolic Tangent of a Complex number. + + The complex value. + The hyperbolic tangent of a complex number. + + + + Hyperbolic Cotangent + + The hyperbolic angle, i.e. the area of the hyperbolic sector. + The hyperbolic cotangent of the angle. + + + + Hyperbolic Cotangent of a Complex number. + + The complex value. + The hyperbolic cotangent of a complex number. + + + + Hyperbolic Secant + + The hyperbolic angle, i.e. the area of the hyperbolic sector. + The hyperbolic secant of the angle. + + + + Hyperbolic Secant of a Complex number. + + The complex value. + The hyperbolic secant of a complex number. + + + + Hyperbolic Cosecant + + The hyperbolic angle, i.e. the area of the hyperbolic sector. + The hyperbolic cosecant of the angle. + + + + Hyperbolic Cosecant of a Complex number. + + The complex value. + The hyperbolic cosecant of a complex number. + + + + Hyperbolic Area Sine + + The real value. + The hyperbolic angle, i.e. the area of its hyperbolic sector. + + + + Hyperbolic Area Sine of this Complex number. + + The complex value. + The hyperbolic arc sine of a complex number. + + + + Hyperbolic Area Cosine + + The real value. + The hyperbolic angle, i.e. the area of its hyperbolic sector. + + + + Hyperbolic Area Cosine of this Complex number. + + The complex value. + The hyperbolic arc cosine of a complex number. + + + + Hyperbolic Area Tangent + + The real value. + The hyperbolic angle, i.e. the area of its hyperbolic sector. + + + + Hyperbolic Area Tangent of this Complex number. + + The complex value. + The hyperbolic arc tangent of a complex number. + + + + Hyperbolic Area Cotangent + + The real value. + The hyperbolic angle, i.e. the area of its hyperbolic sector. + + + + Hyperbolic Area Cotangent of this Complex number. + + The complex value. + The hyperbolic arc cotangent of a complex number. + + + + Hyperbolic Area Secant + + The real value. + The hyperbolic angle, i.e. the area of its hyperbolic sector. + + + + Hyperbolic Area Secant of this Complex number. + + The complex value. + The hyperbolic arc secant of a complex number. + + + + Hyperbolic Area Cosecant + + The real value. + The hyperbolic angle, i.e. the area of its hyperbolic sector. + + + + Hyperbolic Area Cosecant of this Complex number. + + The complex value. + The hyperbolic arc cosecant of a complex number. + + + + Hamming window. Named after Richard Hamming. + Symmetric version, useful e.g. for filter design purposes. + + + + + Hamming window. Named after Richard Hamming. + Periodic version, useful e.g. for FFT purposes. + + + + + Hann window. Named after Julius von Hann. + Symmetric version, useful e.g. for filter design purposes. + + + + + Hann window. Named after Julius von Hann. + Periodic version, useful e.g. for FFT purposes. + + + + + Cosine window. + Symmetric version, useful e.g. for filter design purposes. + + + + + Cosine window. + Periodic version, useful e.g. for FFT purposes. + + + + + Lanczos window. + Symmetric version, useful e.g. for filter design purposes. + + + + + Lanczos window. + Periodic version, useful e.g. for FFT purposes. + + + + + Gauss window. + + + + + Blackman window. + + + + + Blackman-Harris window. + + + + + Blackman-Nuttall window. + + + + + Bartlett window. + + + + + Bartlett-Hann window. + + + + + Nuttall window. + + + + + Flat top window. + + + + + Uniform rectangular (dirichlet) window. + + + + + Triangular window. + + + + + A strongly-typed resource class, for looking up localized strings, etc. + + + + + Returns the cached ResourceManager instance used by this class. + + + + + Overrides the current thread's CurrentUICulture property for all + resource lookups using this strongly typed resource class. + + + + + Looks up a localized string similar to The accuracy couldn't be reached with the specified number of iterations.. + + + + + Looks up a localized string similar to The array arguments must have the same length.. + + + + + Looks up a localized string similar to The given array has the wrong length. Should be {0}.. + + + + + Looks up a localized string similar to The argument must be between 0 and 1.. + + + + + Looks up a localized string similar to Value cannot be in the range -1 < x < 1.. + + + + + Looks up a localized string similar to Value must be even.. + + + + + Looks up a localized string similar to The histogram does not contain the value.. + + + + + Looks up a localized string similar to Value is expected to be between {0} and {1} (including {0} and {1}).. + + + + + Looks up a localized string similar to At least one item of {0} is a null reference (Nothing in Visual Basic).. + + + + + Looks up a localized string similar to Value must be greater than or equal to one.. + + + + + Looks up a localized string similar to Matrix dimensions must agree.. + + + + + Looks up a localized string similar to Matrix dimensions must agree: {0}.. + + + + + Looks up a localized string similar to Matrix dimensions must agree: op1 is {0}, op2 is {1}.. + + + + + Looks up a localized string similar to Matrix dimensions must agree: op1 is {0}, op2 is {1}, op3 is {2}.. + + + + + Looks up a localized string similar to The requested matrix does not exist.. + + + + + Looks up a localized string similar to The matrix indices must not be out of range of the given matrix.. + + + + + Looks up a localized string similar to Matrix must not be rank deficient.. + + + + + Looks up a localized string similar to Matrix must not be singular.. + + + + + Looks up a localized string similar to Matrix must be positive definite.. + + + + + Looks up a localized string similar to Matrix column dimensions must agree.. + + + + + Looks up a localized string similar to Matrix row dimensions must agree.. + + + + + Looks up a localized string similar to Matrix must have exactly one column.. + + + + + Looks up a localized string similar to Matrix must have exactly one column and row, thus have only one cell.. + + + + + Looks up a localized string similar to Matrix must have exactly one row.. + + + + + Looks up a localized string similar to Matrix must be square.. + + + + + Looks up a localized string similar to Matrix must be symmetric.. + + + + + Looks up a localized string similar to Matrix must be symmetric positive definite.. + + + + + Looks up a localized string similar to In the specified range, the exclusive maximum must be greater than the inclusive minimum.. + + + + + Looks up a localized string similar to In the specified range, the minimum is greater than maximum.. + + + + + Looks up a localized string similar to Value must be positive.. + + + + + Looks up a localized string similar to Value must neither be infinite nor NaN.. + + + + + Looks up a localized string similar to Value must not be negative (zero is ok).. + + + + + Looks up a localized string similar to {0} is a null reference (Nothing in Visual Basic).. + + + + + Looks up a localized string similar to Value must be odd.. + + + + + Looks up a localized string similar to {0} must be greater than {1}.. + + + + + Looks up a localized string similar to {0} must be greater than or equal to {1}.. + + + + + Looks up a localized string similar to {0} must be smaller than {1}.. + + + + + Looks up a localized string similar to {0} must be smaller than or equal to {1}.. + + + + + Looks up a localized string similar to The chosen parameter set is invalid (probably some value is out of range).. + + + + + Looks up a localized string similar to The given expression does not represent a complex number.. + + + + + Looks up a localized string similar to Value must be positive (and not zero).. + + + + + Looks up a localized string similar to Size must be a Power of Two.. + + + + + Looks up a localized string similar to Size must be a Power of Two in every dimension.. + + + + + Looks up a localized string similar to The range between {0} and {1} must be less than or equal to {2}.. + + + + + Looks up a localized string similar to Arguments must be different objects.. + + + + + Looks up a localized string similar to Array must have exactly one dimension (and not be null).. + + + + + Looks up a localized string similar to Value is too large.. + + + + + Looks up a localized string similar to Value is too large for the current iteration limit.. + + + + + Looks up a localized string similar to Type mismatch.. + + + + + Looks up a localized string similar to The upper bound must be strictly larger than the lower bound.. + + + + + Looks up a localized string similar to The upper bound must be at least as large as the lower bound.. + + + + + Looks up a localized string similar to Array length must be a multiple of {0}.. + + + + + Looks up a localized string similar to All vectors must have the same dimensionality.. + + + + + Looks up a localized string similar to The vector must have 3 dimensions.. + + + + + Looks up a localized string similar to The given array is too small. It must be at least {0} long.. + + + + + Looks up a localized string similar to Big endian files are not supported.. + + + + + Looks up a localized string similar to The supplied collection is empty.. + + + + + Looks up a localized string similar to Complex matrices are not supported.. + + + + + Looks up a localized string similar to An algorithm failed to converge.. + + + + + Looks up a localized string similar to The sample size must be larger than the given degrees of freedom.. + + + + + Looks up a localized string similar to This feature is not implemented yet (but is planned).. + + + + + Looks up a localized string similar to The given file doesn't exist.. + + + + + Looks up a localized string similar to Sample points should be sorted in strictly ascending order. + + + + + Looks up a localized string similar to All sample points should be unique.. + + + + + Looks up a localized string similar to Invalid parameterization for the distribution.. + + + + + Looks up a localized string similar to Invalid Left Boundary Condition.. + + + + + Looks up a localized string similar to The operation could not be performed because the accumulator is empty.. + + + + + Looks up a localized string similar to The operation could not be performed because the histogram is empty.. + + + + + Looks up a localized string similar to Not enough points in the distribution.. + + + + + Looks up a localized string similar to No Samples Provided. Preparation Required.. + + + + + Looks up a localized string similar to An invalid parameter was passed to a native method.. + + + + + Looks up a localized string similar to An invalid parameter was passed to a native method, parameter number : {0}. + + + + + Looks up a localized string similar to Invalid Right Boundary Condition.. + + + + + Looks up a localized string similar to Lag must be positive. + + + + + Looks up a localized string similar to Lag must be smaller than the sample size. + + + + + Looks up a localized string similar to ddd MMM dd HH:mm:ss yyyy. + + + + + Looks up a localized string similar to Matrices can not be empty and must have at least one row and column.. + + + + + Looks up a localized string similar to The number of columns of a matrix must be positive.. + + + + + Looks up a localized string similar to Matrix must be in sparse storage format. + + + + + Looks up a localized string similar to The number of rows of a matrix must be positive.. + + + + + Looks up a localized string similar to The number of rows or columns of a matrix must be positive.. + + + + + Looks up a localized string similar to Unable to allocate native memory.. + + + + + Looks up a localized string similar to Only 1 and 2 dimensional arrays are supported.. + + + + + Looks up a localized string similar to Data must contain at least {0} values.. + + + + + Looks up a localized string similar to Name cannot contain a space. name: {0}. + + + + + Looks up a localized string similar to {0} is not a supported type.. + + + + + Looks up a localized string similar to Algorithm experience a numerical break down + . + + + + + Looks up a localized string similar to The two arguments can't be compared (maybe they are part of a partial ordering?). + + + + + Looks up a localized string similar to The integer array does not represent a valid permutation.. + + + + + Looks up a localized string similar to The sampler's proposal distribution is not upper bounding the target density.. + + + + + Looks up a localized string similar to A regression of the requested order requires at least {0} samples. Only {1} samples have been provided. . + + + + + Looks up a localized string similar to The algorithm has failed, exceeded the number of iterations allowed or there is no root within the provided bounds.. + + + + + Looks up a localized string similar to The algorithm has failed, exceeded the number of iterations allowed or there is no root within the provided bounds. Consider to use RobustNewtonRaphson instead.. + + + + + Looks up a localized string similar to The lower and upper bounds must bracket a single root.. + + + + + Looks up a localized string similar to The algorithm ended without root in the range.. + + + + + Looks up a localized string similar to The number of rows must greater than or equal to the number of columns.. + + + + + Looks up a localized string similar to All sample vectors must have the same length. However, vectors with disagreeing length {0} and {1} have been provided. A sample with index i is given by the value at index i of each provided vector.. + + + + + Looks up a localized string similar to U is singular, and the inversion could not be completed.. + + + + + Looks up a localized string similar to U is singular, and the inversion could not be completed. The {0}-th diagonal element of the factor U is zero.. + + + + + Looks up a localized string similar to The singular vectors were not computed.. + + + + + Looks up a localized string similar to This special case is not supported yet (but is planned).. + + + + + Looks up a localized string similar to The given stop criterion already exist in the collection.. + + + + + Looks up a localized string similar to There is no stop criterion in the collection.. + + + + + Looks up a localized string similar to String parameter cannot be empty or null.. + + + + + Looks up a localized string similar to We only support sparse matrix with less than int.MaxValue elements.. + + + + + Looks up a localized string similar to The moment of the distribution is undefined.. + + + + + Looks up a localized string similar to A user defined provider has not been specified.. + + + + + Looks up a localized string similar to User work buffers are not supported by this provider.. + + + + + Looks up a localized string similar to Vectors can not be empty and must have at least one element.. + + + + + Looks up a localized string similar to The given work array is too small. Check work[0] for the corret size.. + + +
+
diff --git a/src/packages/MathNet.Numerics.3.16.0/lib/portable-net45+netcore45+wpa81+wp8+MonoAndroid1+MonoTouch1/MathNet.Numerics.dll b/src/packages/MathNet.Numerics.3.16.0/lib/portable-net45+netcore45+wpa81+wp8+MonoAndroid1+MonoTouch1/MathNet.Numerics.dll new file mode 100644 index 0000000..d7d8148 Binary files /dev/null and b/src/packages/MathNet.Numerics.3.16.0/lib/portable-net45+netcore45+wpa81+wp8+MonoAndroid1+MonoTouch1/MathNet.Numerics.dll differ diff --git a/src/packages/MathNet.Numerics.3.16.0/lib/portable-net45+sl5+netcore45+MonoAndroid1+MonoTouch1/MathNet.Numerics.XML b/src/packages/MathNet.Numerics.3.16.0/lib/portable-net45+sl5+netcore45+MonoAndroid1+MonoTouch1/MathNet.Numerics.XML new file mode 100644 index 0000000..59edef1 --- /dev/null +++ b/src/packages/MathNet.Numerics.3.16.0/lib/portable-net45+sl5+netcore45+MonoAndroid1+MonoTouch1/MathNet.Numerics.XML @@ -0,0 +1,49130 @@ + + + + MathNet.Numerics + + + + + Useful extension methods for Arrays. + + + + + Copies the values from on array to another. + + The source array. + The destination array. + + + + Copies the values from on array to another. + + The source array. + The destination array. + + + + Copies the values from on array to another. + + The source array. + The destination array. + + + + Copies the values from on array to another. + + The source array. + The destination array. + + + + Enumerative Combinatorics and Counting. + + + + + Count the number of possible variations without repetition. + The order matters and each object can be chosen only once. + + Number of elements in the set. + Number of elements to choose from the set. Each element is chosen at most once. + Maximum number of distinct variations. + + + + Count the number of possible variations with repetition. + The order matters and each object can be chosen more than once. + + Number of elements in the set. + Number of elements to choose from the set. Each element is chosen 0, 1 or multiple times. + Maximum number of distinct variations with repetition. + + + + Count the number of possible combinations without repetition. + The order does not matter and each object can be chosen only once. + + Number of elements in the set. + Number of elements to choose from the set. Each element is chosen at most once. + Maximum number of combinations. + + + + Count the number of possible combinations with repetition. + The order does not matter and an object can be chosen more than once. + + Number of elements in the set. + Number of elements to choose from the set. Each element is chosen 0, 1 or multiple times. + Maximum number of combinations with repetition. + + + + Count the number of possible permutations (without repetition). + + Number of (distinguishable) elements in the set. + Maximum number of permutations without repetition. + + + + Generate a random permutation, without repetition, by generating the index numbers 0 to N-1 and shuffle them randomly. + Implemented using Fisher-Yates Shuffling. + + An array of length N that contains (in any order) the integers of the interval [0, N). + Number of (distinguishable) elements in the set. + The random number generator to use. Optional; the default random source will be used if null. + + + + Select a random permutation, without repetition, from a data array by reordering the provided array in-place. + Implemented using Fisher-Yates Shuffling. The provided data array will be modified. + + The data array to be reordered. The array will be modified by this routine. + The random number generator to use. Optional; the default random source will be used if null. + + + + Select a random permutation from a data sequence by returning the provided data in random order. + Implemented using Fisher-Yates Shuffling. + + The data elements to be reordered. + The random number generator to use. Optional; the default random source will be used if null. + + + + Generate a random combination, without repetition, by randomly selecting some of N elements. + + Number of elements in the set. + The random number generator to use. Optional; the default random source will be used if null. + Boolean mask array of length N, for each item true if it is selected. + + + + Generate a random combination, without repetition, by randomly selecting k of N elements. + + Number of elements in the set. + Number of elements to choose from the set. Each element is chosen at most once. + The random number generator to use. Optional; the default random source will be used if null. + Boolean mask array of length N, for each item true if it is selected. + + + + Select a random combination, without repetition, from a data sequence by selecting k elements in original order. + + The data source to choose from. + Number of elements (k) to choose from the data set. Each element is chosen at most once. + The random number generator to use. Optional; the default random source will be used if null. + The chosen combination, in the original order. + + + + Generates a random combination, with repetition, by randomly selecting k of N elements. + + Number of elements in the set. + Number of elements to choose from the set. Elements can be chosen more than once. + The random number generator to use. Optional; the default random source will be used if null. + Integer mask array of length N, for each item the number of times it was selected. + + + + Select a random combination, with repetition, from a data sequence by selecting k elements in original order. + + The data source to choose from. + Number of elements (k) to choose from the data set. Elements can be chosen more than once. + The random number generator to use. Optional; the default random source will be used if null. + The chosen combination with repetition, in the original order. + + + + Generate a random variation, without repetition, by randomly selecting k of n elements with order. + Implemented using partial Fisher-Yates Shuffling. + + Number of elements in the set. + Number of elements to choose from the set. Each element is chosen at most once. + The random number generator to use. Optional; the default random source will be used if null. + An array of length K that contains the indices of the selections as integers of the interval [0, N). + + + + Select a random variation, without repetition, from a data sequence by randomly selecting k elements in random order. + Implemented using partial Fisher-Yates Shuffling. + + The data source to choose from. + Number of elements (k) to choose from the set. Each element is chosen at most once. + The random number generator to use. Optional; the default random source will be used if null. + The chosen variation, in random order. + + + + Generate a random variation, with repetition, by randomly selecting k of n elements with order. + + Number of elements in the set. + Number of elements to choose from the set. Elements can be chosen more than once. + The random number generator to use. Optional; the default random source will be used if null. + An array of length K that contains the indices of the selections as integers of the interval [0, N). + + + + Select a random variation, with repetition, from a data sequence by randomly selecting k elements in random order. + + The data source to choose from. + Number of elements (k) to choose from the data set. Elements can be chosen more than once. + The random number generator to use. Optional; the default random source will be used if null. + The chosen variation with repetition, in random order. + + + + 32-bit single precision complex numbers class. + + + + The class Complex32 provides all elementary operations + on complex numbers. All the operators +, -, + *, /, ==, != are defined in the + canonical way. Additional complex trigonometric functions + are also provided. Note that the Complex32 structures + has two special constant values and + . + + + + Complex32 x = new Complex32(1f,2f); + Complex32 y = Complex32.FromPolarCoordinates(1f, Math.Pi); + Complex32 z = (x + y) / (x - y); + + + + For mathematical details about complex numbers, please + have a look at the + Wikipedia + + + + + + The real component of the complex number. + + + + + The imaginary component of the complex number. + + + + + Initializes a new instance of the Complex32 structure with the given real + and imaginary parts. + + The value for the real component. + The value for the imaginary component. + + + + Creates a complex number from a point's polar coordinates. + + A complex number. + The magnitude, which is the distance from the origin (the intersection of the x-axis and the y-axis) to the number. + The phase, which is the angle from the line to the horizontal axis, measured in radians. + + + + Returns a new instance + with a real number equal to zero and an imaginary number equal to zero. + + + + + Returns a new instance + with a real number equal to one and an imaginary number equal to zero. + + + + + Returns a new instance + with a real number equal to zero and an imaginary number equal to one. + + + + + Returns a new instance + with real and imaginary numbers positive infinite. + + + + + Returns a new instance + with real and imaginary numbers not a number. + + + + + Gets the real component of the complex number. + + The real component of the complex number. + + + + Gets the real imaginary component of the complex number. + + The real imaginary component of the complex number. + + + + Gets the phase or argument of this Complex32. + + + Phase always returns a value bigger than negative Pi and + smaller or equal to Pi. If this Complex32 is zero, the Complex32 + is assumed to be positive real with an argument of zero. + + The phase or argument of this Complex32 + + + + Gets the magnitude (or absolute value) of a complex number. + + Assuming that magnitude of (inf,a) and (a,inf) and (inf,inf) is inf and (NaN,a), (a,NaN) and (NaN,NaN) is NaN + The magnitude of the current instance. + + + + Gets the squared magnitude (or squared absolute value) of a complex number. + + The squared magnitude of the current instance. + + + + Gets the unity of this complex (same argument, but on the unit circle; exp(I*arg)) + + The unity of this Complex32. + + + + Gets a value indicating whether the Complex32 is zero. + + true if this instance is zero; otherwise, false. + + + + Gets a value indicating whether the Complex32 is one. + + true if this instance is one; otherwise, false. + + + + Gets a value indicating whether the Complex32 is the imaginary unit. + + true if this instance is ImaginaryOne; otherwise, false. + + + + Gets a value indicating whether the provided Complex32evaluates + to a value that is not a number. + + + true if this instance is ; otherwise, + false. + + + + + Gets a value indicating whether the provided Complex32 evaluates to an + infinite value. + + + true if this instance is infinite; otherwise, false. + + + True if it either evaluates to a complex infinity + or to a directed infinity. + + + + + Gets a value indicating whether the provided Complex32 is real. + + true if this instance is a real number; otherwise, false. + + + + Gets a value indicating whether the provided Complex32 is real and not negative, that is >= 0. + + + true if this instance is real nonnegative number; otherwise, false. + + + + + Exponential of this Complex32 (exp(x), E^x). + + + The exponential of this complex number. + + + + + Natural Logarithm of this Complex32 (Base E). + + The natural logarithm of this complex number. + + + + Common Logarithm of this Complex32 (Base 10). + + The common logarithm of this complex number. + + + + Logarithm of this Complex32 with custom base. + + The logarithm of this complex number. + + + + Raise this Complex32 to the given value. + + + The exponent. + + + The complex number raised to the given exponent. + + + + + Raise this Complex32 to the inverse of the given value. + + + The root exponent. + + + The complex raised to the inverse of the given exponent. + + + + + The Square (power 2) of this Complex32 + + + The square of this complex number. + + + + + The Square Root (power 1/2) of this Complex32 + + + The square root of this complex number. + + + + + Evaluate all square roots of this Complex32. + + + + + Evaluate all cubic roots of this Complex32. + + + + + Equality test. + + One of complex numbers to compare. + The other complex numbers to compare. + true if the real and imaginary components of the two complex numbers are equal; false otherwise. + + + + Inequality test. + + One of complex numbers to compare. + The other complex numbers to compare. + true if the real or imaginary components of the two complex numbers are not equal; false otherwise. + + + + Unary addition. + + The complex number to operate on. + Returns the same complex number. + + + + Unary minus. + + The complex number to operate on. + The negated value of the . + + + Addition operator. Adds two complex numbers together. + The result of the addition. + One of the complex numbers to add. + The other complex numbers to add. + + + Subtraction operator. Subtracts two complex numbers. + The result of the subtraction. + The complex number to subtract from. + The complex number to subtract. + + + Addition operator. Adds a complex number and float together. + The result of the addition. + The complex numbers to add. + The float value to add. + + + Subtraction operator. Subtracts float value from a complex value. + The result of the subtraction. + The complex number to subtract from. + The float value to subtract. + + + Addition operator. Adds a complex number and float together. + The result of the addition. + The float value to add. + The complex numbers to add. + + + Subtraction operator. Subtracts complex value from a float value. + The result of the subtraction. + The float vale to subtract from. + The complex value to subtract. + + + Multiplication operator. Multiplies two complex numbers. + The result of the multiplication. + One of the complex numbers to multiply. + The other complex number to multiply. + + + Multiplication operator. Multiplies a complex number with a float value. + The result of the multiplication. + The float value to multiply. + The complex number to multiply. + + + Multiplication operator. Multiplies a complex number with a float value. + The result of the multiplication. + The complex number to multiply. + The float value to multiply. + + + Division operator. Divides a complex number by another. + Enhanced Smith's algorithm for dividing two complex numbers + + The result of the division. + The dividend. + The divisor. + + + + Helper method for dividing. + + Re first + Im first + Re second + Im second + + + + + Division operator. Divides a float value by a complex number. + Algorithm based on Smith's algorithm + + The result of the division. + The dividend. + The divisor. + + + Division operator. Divides a complex number by a float value. + The result of the division. + The dividend. + The divisor. + + + + Computes the conjugate of a complex number and returns the result. + + + + + Returns the multiplicative inverse of a complex number. + + + + + Converts the value of the current complex number to its equivalent string representation in Cartesian form. + + The string representation of the current instance in Cartesian form. + + + + Converts the value of the current complex number to its equivalent string representation + in Cartesian form by using the specified format for its real and imaginary parts. + + The string representation of the current instance in Cartesian form. + A standard or custom numeric format string. + + is not a valid format string. + + + + Converts the value of the current complex number to its equivalent string representation + in Cartesian form by using the specified culture-specific formatting information. + + The string representation of the current instance in Cartesian form, as specified by . + An object that supplies culture-specific formatting information. + + + Converts the value of the current complex number to its equivalent string representation + in Cartesian form by using the specified format and culture-specific format information for its real and imaginary parts. + The string representation of the current instance in Cartesian form, as specified by and . + A standard or custom numeric format string. + An object that supplies culture-specific formatting information. + + is not a valid format string. + + + + Checks if two complex numbers are equal. Two complex numbers are equal if their + corresponding real and imaginary components are equal. + + + Returns true if the two objects are the same object, or if their corresponding + real and imaginary components are equal, false otherwise. + + + The complex number to compare to with. + + + + + The hash code for the complex number. + + + The hash code of the complex number. + + + The hash code is calculated as + System.Math.Exp(ComplexMath.Absolute(complexNumber)). + + + + + Checks if two complex numbers are equal. Two complex numbers are equal if their + corresponding real and imaginary components are equal. + + + Returns true if the two objects are the same object, or if their corresponding + real and imaginary components are equal, false otherwise. + + + The complex number to compare to with. + + + + + Creates a complex number based on a string. The string can be in the + following formats (without the quotes): 'n', 'ni', 'n +/- ni', + 'ni +/- n', 'n,n', 'n,ni,' '(n,n)', or '(n,ni)', where n is a float. + + + A complex number containing the value specified by the given string. + + + the string to parse. + + + An that supplies culture-specific + formatting information. + + + + + Parse a part (real or complex) from a complex number. + + Start Token. + Is set to true if the part identified itself as being imaginary. + + An that supplies culture-specific + formatting information. + + Resulting part as float. + + + + + Converts the string representation of a complex number to a single-precision complex number equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex number to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will contain complex32.Zero. This parameter is passed uninitialized + + + + + Converts the string representation of a complex number to single-precision complex number equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex number to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will contain complex32.Zero. This parameter is passed uninitialized + + + + + Explicit conversion of a real decimal to a Complex32. + + The decimal value to convert. + The result of the conversion. + + + + Explicit conversion of a Complex to a Complex32. + + The decimal value to convert. + The result of the conversion. + + + + Implicit conversion of a real byte to a Complex32. + + The byte value to convert. + The result of the conversion. + + + + Implicit conversion of a real short to a Complex32. + + The short value to convert. + The result of the conversion. + + + + Implicit conversion of a signed byte to a Complex32. + + The signed byte value to convert. + The result of the conversion. + + + + Implicit conversion of a unsgined real short to a Complex32. + + The unsgined short value to convert. + The result of the conversion. + + + + Implicit conversion of a real int to a Complex32. + + The int value to convert. + The result of the conversion. + + + + Implicit conversion of a BigInteger int to a Complex32. + + The BigInteger value to convert. + The result of the conversion. + + + + Implicit conversion of a real long to a Complex32. + + The long value to convert. + The result of the conversion. + + + + Implicit conversion of a real uint to a Complex32. + + The uint value to convert. + The result of the conversion. + + + + Implicit conversion of a real ulong to a Complex32. + + The ulong value to convert. + The result of the conversion. + + + + Implicit conversion of a real float to a Complex32. + + The float value to convert. + The result of the conversion. + + + + Implicit conversion of a real double to a Complex32. + + The double value to convert. + The result of the conversion. + + + + Converts this Complex32 to a . + + A with the same values as this Complex32. + + + + Returns the additive inverse of a specified complex number. + + The result of the real and imaginary components of the value parameter multiplied by -1. + A complex number. + + + + Computes the conjugate of a complex number and returns the result. + + The conjugate of . + A complex number. + + + + Adds two complex numbers and returns the result. + + The sum of and . + The first complex number to add. + The second complex number to add. + + + + Subtracts one complex number from another and returns the result. + + The result of subtracting from . + The value to subtract from (the minuend). + The value to subtract (the subtrahend). + + + + Returns the product of two complex numbers. + + The product of the and parameters. + The first complex number to multiply. + The second complex number to multiply. + + + + Divides one complex number by another and returns the result. + + The quotient of the division. + The complex number to be divided. + The complex number to divide by. + + + + Returns the multiplicative inverse of a complex number. + + The reciprocal of . + A complex number. + + + + Returns the square root of a specified complex number. + + The square root of . + A complex number. + + + + Gets the absolute value (or magnitude) of a complex number. + + The absolute value of . + A complex number. + + + + Returns e raised to the power specified by a complex number. + + The number e raised to the power . + A complex number that specifies a power. + + + + Returns a specified complex number raised to a power specified by a complex number. + + The complex number raised to the power . + A complex number to be raised to a power. + A complex number that specifies a power. + + + + Returns a specified complex number raised to a power specified by a single-precision floating-point number. + + The complex number raised to the power . + A complex number to be raised to a power. + A single-precision floating-point number that specifies a power. + + + + Returns the natural (base e) logarithm of a specified complex number. + + The natural (base e) logarithm of . + A complex number. + + + + Returns the logarithm of a specified complex number in a specified base. + + The logarithm of in base . + A complex number. + The base of the logarithm. + + + + Returns the base-10 logarithm of a specified complex number. + + The base-10 logarithm of . + A complex number. + + + + Returns the sine of the specified complex number. + + The sine of . + A complex number. + + + + Returns the cosine of the specified complex number. + + The cosine of . + A complex number. + + + + Returns the tangent of the specified complex number. + + The tangent of . + A complex number. + + + + Returns the angle that is the arc sine of the specified complex number. + + The angle which is the arc sine of . + A complex number. + + + + Returns the angle that is the arc cosine of the specified complex number. + + The angle, measured in radians, which is the arc cosine of . + A complex number that represents a cosine. + + + + Returns the angle that is the arc tangent of the specified complex number. + + The angle that is the arc tangent of . + A complex number. + + + + Returns the hyperbolic sine of the specified complex number. + + The hyperbolic sine of . + A complex number. + + + + Returns the hyperbolic cosine of the specified complex number. + + The hyperbolic cosine of . + A complex number. + + + + Returns the hyperbolic tangent of the specified complex number. + + The hyperbolic tangent of . + A complex number. + + + + Extension methods for the Complex type provided by System.Numerics + + + + + Gets the squared magnitude of the Complex number. + + The number to perfom this operation on. + The squared magnitude of the Complex number. + + + + Gets the unity of this complex (same argument, but on the unit circle; exp(I*arg)) + + The unity of this Complex. + + + + Gets the conjugate of the Complex number. + + The number to perfom this operation on. + + The semantic of setting the conjugate is such that + + // a, b of type Complex32 + a.Conjugate = b; + + is equivalent to + + // a, b of type Complex32 + a = b.Conjugate + + + The conjugate of the number. + + + + Returns the multiplicative inverse of a complex number. + + + + + Exponential of this Complex (exp(x), E^x). + + The number to perfom this operation on. + + The exponential of this complex number. + + + + + Natural Logarithm of this Complex (Base E). + + The number to perfom this operation on. + + The natural logarithm of this complex number. + + + + + Common Logarithm of this Complex (Base 10). + + The common logarithm of this complex number. + + + + Logarithm of this Complex with custom base. + + The logarithm of this complex number. + + + + Raise this Complex to the given value. + + The number to perfom this operation on. + + The exponent. + + + The complex number raised to the given exponent. + + + + + Raise this Complex to the inverse of the given value. + + The number to perfom this operation on. + + The root exponent. + + + The complex raised to the inverse of the given exponent. + + + + + The Square (power 2) of this Complex + + The number to perfom this operation on. + + The square of this complex number. + + + + + The Square Root (power 1/2) of this Complex + + The number to perfom this operation on. + + The square root of this complex number. + + + + + Evaluate all square roots of this Complex. + + + + + Evaluate all cubic roots of this Complex. + + + + + Gets a value indicating whether the Complex32 is zero. + + The number to perfom this operation on. + true if this instance is zero; otherwise, false. + + + + Gets a value indicating whether the Complex32 is one. + + The number to perfom this operation on. + true if this instance is one; otherwise, false. + + + + Gets a value indicating whether the Complex32 is the imaginary unit. + + true if this instance is ImaginaryOne; otherwise, false. + The number to perfom this operation on. + + + + Gets a value indicating whether the provided Complex32evaluates + to a value that is not a number. + + The number to perfom this operation on. + + true if this instance is NaN; otherwise, + false. + + + + + Gets a value indicating whether the provided Complex32 evaluates to an + infinite value. + + The number to perfom this operation on. + + true if this instance is infinite; otherwise, false. + + + True if it either evaluates to a complex infinity + or to a directed infinity. + + + + + Gets a value indicating whether the provided Complex32 is real. + + The number to perfom this operation on. + true if this instance is a real number; otherwise, false. + + + + Gets a value indicating whether the provided Complex32 is real and not negative, that is >= 0. + + The number to perfom this operation on. + + true if this instance is real nonnegative number; otherwise, false. + + + + + Returns a Norm of a value of this type, which is appropriate for measuring how + close this value is to zero. + + + + + Returns a Norm of a value of this type, which is appropriate for measuring how + close this value is to zero. + + + + + Returns a Norm of the difference of two values of this type, which is + appropriate for measuring how close together these two values are. + + + + + Returns a Norm of the difference of two values of this type, which is + appropriate for measuring how close together these two values are. + + + + + Creates a complex number based on a string. The string can be in the + following formats (without the quotes): 'n', 'ni', 'n +/- ni', + 'ni +/- n', 'n,n', 'n,ni,' '(n,n)', or '(n,ni)', where n is a double. + + + A complex number containing the value specified by the given string. + + + The string to parse. + + + + + Creates a complex number based on a string. The string can be in the + following formats (without the quotes): 'n', 'ni', 'n +/- ni', + 'ni +/- n', 'n,n', 'n,ni,' '(n,n)', or '(n,ni)', where n is a double. + + + A complex number containing the value specified by the given string. + + + the string to parse. + + + An that supplies culture-specific + formatting information. + + + + + Parse a part (real or complex) from a complex number. + + Start Token. + Is set to true if the part identified itself as being imaginary. + + An that supplies culture-specific + formatting information. + + Resulting part as double. + + + + + Converts the string representation of a complex number to a double-precision complex number equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex number to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will contain Complex.Zero. This parameter is passed uninitialized. + + + + + Converts the string representation of a complex number to double-precision complex number equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex number to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will contain complex32.Zero. This parameter is passed uninitialized + + + + + Creates a Complex32 number based on a string. The string can be in the + following formats (without the quotes): 'n', 'ni', 'n +/- ni', + 'ni +/- n', 'n,n', 'n,ni,' '(n,n)', or '(n,ni)', where n is a double. + + + A complex number containing the value specified by the given string. + + + the string to parse. + + + + + Creates a Complex32 number based on a string. The string can be in the + following formats (without the quotes): 'n', 'ni', 'n +/- ni', + 'ni +/- n', 'n,n', 'n,ni,' '(n,n)', or '(n,ni)', where n is a double. + + + A complex number containing the value specified by the given string. + + + the string to parse. + + + An that supplies culture-specific + formatting information. + + + + + Converts the string representation of a complex number to a single-precision complex number equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex number to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will contain complex32.Zero. This parameter is passed uninitialized. + + + + + Converts the string representation of a complex number to single-precision complex number equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex number to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will contain Complex.Zero. This parameter is passed uninitialized. + + + + + A collection of frequently used mathematical constants. + + + + The number e + + + The number log[2](e) + + + The number log[10](e) + + + The number log[e](2) + + + The number log[e](10) + + + The number log[e](pi) + + + The number log[e](2*pi)/2 + + + The number 1/e + + + The number sqrt(e) + + + The number sqrt(2) + + + The number sqrt(3) + + + The number sqrt(1/2) = 1/sqrt(2) = sqrt(2)/2 + + + The number sqrt(3)/2 + + + The number pi + + + The number pi*2 + + + The number pi/2 + + + The number pi*3/2 + + + The number pi/4 + + + The number sqrt(pi) + + + The number sqrt(2pi) + + + The number sqrt(2*pi*e) + + + The number log(sqrt(2*pi)) + + + The number log(sqrt(2*pi*e)) + + + The number log(2 * sqrt(e / pi)) + + + The number 1/pi + + + The number 2/pi + + + The number 1/sqrt(pi) + + + The number 1/sqrt(2pi) + + + The number 2/sqrt(pi) + + + The number 2 * sqrt(e / pi) + + + The number (pi)/180 - factor to convert from Degree (deg) to Radians (rad). + + + + + The number (pi)/200 - factor to convert from NewGrad (grad) to Radians (rad). + + + + + The number ln(10)/20 - factor to convert from Power Decibel (dB) to Neper (Np). Use this version when the Decibel represent a power gain but the compared values are not powers (e.g. amplitude, current, voltage). + + + The number ln(10)/10 - factor to convert from Neutral Decibel (dB) to Neper (Np). Use this version when either both or neither of the Decibel and the compared values represent powers. + + + The Catalan constant + Sum(k=0 -> inf){ (-1)^k/(2*k + 1)2 } + + + The Euler-Mascheroni constant + lim(n -> inf){ Sum(k=1 -> n) { 1/k - log(n) } } + + + The number (1+sqrt(5))/2, also known as the golden ratio + + + The Glaisher constant + e^(1/12 - Zeta(-1)) + + + The Khinchin constant + prod(k=1 -> inf){1+1/(k*(k+2))^log(k,2)} + + + + The size of a double in bytes. + + + + + The size of an int in bytes. + + + + + The size of a float in bytes. + + + + + The size of a Complex in bytes. + + + + + The size of a Complex in bytes. + + + + Speed of Light in Vacuum: c_0 = 2.99792458e8 [m s^-1] (defined, exact; 2007 CODATA) + + + Magnetic Permeability in Vacuum: mu_0 = 4*Pi * 10^-7 [N A^-2 = kg m A^-2 s^-2] (defined, exact; 2007 CODATA) + + + Electric Permittivity in Vacuum: epsilon_0 = 1/(mu_0*c_0^2) [F m^-1 = A^2 s^4 kg^-1 m^-3] (defined, exact; 2007 CODATA) + + + Characteristic Impedance of Vacuum: Z_0 = mu_0*c_0 [Ohm = m^2 kg s^-3 A^-2] (defined, exact; 2007 CODATA) + + + Newtonian Constant of Gravitation: G = 6.67429e-11 [m^3 kg^-1 s^-2] (2007 CODATA) + + + Planck's constant: h = 6.62606896e-34 [J s = m^2 kg s^-1] (2007 CODATA) + + + Reduced Planck's constant: h_bar = h / (2*Pi) [J s = m^2 kg s^-1] (2007 CODATA) + + + Planck mass: m_p = (h_bar*c_0/G)^(1/2) [kg] (2007 CODATA) + + + Planck temperature: T_p = (h_bar*c_0^5/G)^(1/2)/k [K] (2007 CODATA) + + + Planck length: l_p = h_bar/(m_p*c_0) [m] (2007 CODATA) + + + Planck time: t_p = l_p/c_0 [s] (2007 CODATA) + + + Elementary Electron Charge: e = 1.602176487e-19 [C = A s] (2007 CODATA) + + + Magnetic Flux Quantum: theta_0 = h/(2*e) [Wb = m^2 kg s^-2 A^-1] (2007 CODATA) + + + Conductance Quantum: G_0 = 2*e^2/h [S = m^-2 kg^-1 s^3 A^2] (2007 CODATA) + + + Josephson Constant: K_J = 2*e/h [Hz V^-1] (2007 CODATA) + + + Von Klitzing Constant: R_K = h/e^2 [Ohm = m^2 kg s^-3 A^-2] (2007 CODATA) + + + Bohr Magneton: mu_B = e*h_bar/2*m_e [J T^-1] (2007 CODATA) + + + Nuclear Magneton: mu_N = e*h_bar/2*m_p [J T^-1] (2007 CODATA) + + + Fine Structure Constant: alpha = e^2/4*Pi*e_0*h_bar*c_0 [1] (2007 CODATA) + + + Rydberg Constant: R_infty = alpha^2*m_e*c_0/2*h [m^-1] (2007 CODATA) + + + Bor Radius: a_0 = alpha/4*Pi*R_infty [m] (2007 CODATA) + + + Hartree Energy: E_h = 2*R_infty*h*c_0 [J] (2007 CODATA) + + + Quantum of Circulation: h/2*m_e [m^2 s^-1] (2007 CODATA) + + + Fermi Coupling Constant: G_F/(h_bar*c_0)^3 [GeV^-2] (2007 CODATA) + + + Weak Mixin Angle: sin^2(theta_W) [1] (2007 CODATA) + + + Electron Mass: [kg] (2007 CODATA) + + + Electron Mass Energy Equivalent: [J] (2007 CODATA) + + + Electron Molar Mass: [kg mol^-1] (2007 CODATA) + + + Electron Compton Wavelength: [m] (2007 CODATA) + + + Classical Electron Radius: [m] (2007 CODATA) + + + Tomson Cross Section: [m^2] (2002 CODATA) + + + Electron Magnetic Moment: [J T^-1] (2007 CODATA) + + + Electon G-Factor: [1] (2007 CODATA) + + + Muon Mass: [kg] (2007 CODATA) + + + Muon Mass Energy Equivalent: [J] (2007 CODATA) + + + Muon Molar Mass: [kg mol^-1] (2007 CODATA) + + + Muon Compton Wavelength: [m] (2007 CODATA) + + + Muon Magnetic Moment: [J T^-1] (2007 CODATA) + + + Muon G-Factor: [1] (2007 CODATA) + + + Tau Mass: [kg] (2007 CODATA) + + + Tau Mass Energy Equivalent: [J] (2007 CODATA) + + + Tau Molar Mass: [kg mol^-1] (2007 CODATA) + + + Tau Compton Wavelength: [m] (2007 CODATA) + + + Proton Mass: [kg] (2007 CODATA) + + + Proton Mass Energy Equivalent: [J] (2007 CODATA) + + + Proton Molar Mass: [kg mol^-1] (2007 CODATA) + + + Proton Compton Wavelength: [m] (2007 CODATA) + + + Proton Magnetic Moment: [J T^-1] (2007 CODATA) + + + Proton G-Factor: [1] (2007 CODATA) + + + Proton Shielded Magnetic Moment: [J T^-1] (2007 CODATA) + + + Proton Gyro-Magnetic Ratio: [s^-1 T^-1] (2007 CODATA) + + + Proton Shielded Gyro-Magnetic Ratio: [s^-1 T^-1] (2007 CODATA) + + + Neutron Mass: [kg] (2007 CODATA) + + + Neutron Mass Energy Equivalent: [J] (2007 CODATA) + + + Neutron Molar Mass: [kg mol^-1] (2007 CODATA) + + + Neuron Compton Wavelength: [m] (2007 CODATA) + + + Neutron Magnetic Moment: [J T^-1] (2007 CODATA) + + + Neutron G-Factor: [1] (2007 CODATA) + + + Neutron Gyro-Magnetic Ratio: [s^-1 T^-1] (2007 CODATA) + + + Deuteron Mass: [kg] (2007 CODATA) + + + Deuteron Mass Energy Equivalent: [J] (2007 CODATA) + + + Deuteron Molar Mass: [kg mol^-1] (2007 CODATA) + + + Deuteron Magnetic Moment: [J T^-1] (2007 CODATA) + + + Helion Mass: [kg] (2007 CODATA) + + + Helion Mass Energy Equivalent: [J] (2007 CODATA) + + + Helion Molar Mass: [kg mol^-1] (2007 CODATA) + + + Avogadro constant: [mol^-1] (2010 CODATA) + + + The SI prefix factor corresponding to 1 000 000 000 000 000 000 000 000 + + + The SI prefix factor corresponding to 1 000 000 000 000 000 000 000 + + + The SI prefix factor corresponding to 1 000 000 000 000 000 000 + + + The SI prefix factor corresponding to 1 000 000 000 000 000 + + + The SI prefix factor corresponding to 1 000 000 000 000 + + + The SI prefix factor corresponding to 1 000 000 000 + + + The SI prefix factor corresponding to 1 000 000 + + + The SI prefix factor corresponding to 1 000 + + + The SI prefix factor corresponding to 100 + + + The SI prefix factor corresponding to 10 + + + The SI prefix factor corresponding to 0.1 + + + The SI prefix factor corresponding to 0.01 + + + The SI prefix factor corresponding to 0.001 + + + The SI prefix factor corresponding to 0.000 001 + + + The SI prefix factor corresponding to 0.000 000 001 + + + The SI prefix factor corresponding to 0.000 000 000 001 + + + The SI prefix factor corresponding to 0.000 000 000 000 001 + + + The SI prefix factor corresponding to 0.000 000 000 000 000 001 + + + The SI prefix factor corresponding to 0.000 000 000 000 000 000 001 + + + The SI prefix factor corresponding to 0.000 000 000 000 000 000 000 001 + + + + Sets parameters for the library. + + + + + Use a specific provider if configured, e.g. using + environment variables, or fall back to the best providers. + + + + + Use the best provider available. + + + + + Gets or sets a value indicating whether the distribution classes check validate each parameter. + For the multivariate distributions this could involve an expensive matrix factorization. + The default setting of this property is true. + + + + + Gets or sets a value indicating whether to use thread safe random number generators (RNG). + Thread safe RNG about two and half time slower than non-thread safe RNG. + + + true to use thread safe random number generators ; otherwise, false. + + + + + Optional path to try to load native provider binaries from. + + + + + Gets or sets the linear algebra provider. Consider to use UseNativeMKL or UseManaged instead. + + The linear algebra provider. + + + + Gets or sets the fourier transform provider. Consider to use UseNativeMKL or UseManaged instead. + + The linear algebra provider. + + + + Gets or sets a value indicating how many parallel worker threads shall be used + when parallelization is applicable. + + Default to the number of processor cores, must be between 1 and 1024 (inclusive). + + + + Gets or sets the TaskScheduler used to schedule the worker tasks. + + + + + Gets or sets the the block size to use for + the native linear algebra provider. + + The block size. Default 512, must be at least 32. + + + + Gets or sets the order of the matrix when linear algebra provider + must calculate multiply in parallel threads. + + The order. Default 64, must be at least 3. + + + + Gets or sets the number of elements a vector or matrix + must contain before we multiply threads. + + Number of elements. Default 300, must be at least 3. + + + + Numerical Derivative. + + + + + Initialized a NumericalDerivative with the given points and center. + + + + + Initialized a NumericalDerivative with the default points and center for the given order. + + + + + Evaluates the derivative of a scalar univariate function. + + Univariate function handle. + Point at which to evaluate the derivative. + Derivative order. + + + + Creates a function handle for the derivative of a scalar univariate function. + + Univariate function handle. + Derivative order. + + + + Evaluates the first derivative of a scalar univariate function. + + Univariate function handle. + Point at which to evaluate the derivative. + + + + Creates a function handle for the first derivative of a scalar univariate function. + + Univariate function handle. + + + + Evaluates the second derivative of a scalar univariate function. + + Univariate function handle. + Point at which to evaluate the derivative. + + + + Creates a function handle for the second derivative of a scalar univariate function. + + Univariate function handle. + + + + Evaluates the partial derivative of a multivariate function. + + Multivariate function handle. + Vector at which to evaluate the derivative. + Index of independent variable for partial derivative. + Derivative order. + + + + Creates a function handle for the partial derivative of a multivariate function. + + Multivariate function handle. + Index of independent variable for partial derivative. + Derivative order. + + + + Evaluates the first partial derivative of a multivariate function. + + Multivariate function handle. + Vector at which to evaluate the derivative. + Index of independent variable for partial derivative. + + + + Creates a function handle for the first partial derivative of a multivariate function. + + Multivariate function handle. + Index of independent variable for partial derivative. + + + + Evaluates the partial derivative of a bivariate function. + + Bivariate function handle. + First argument at which to evaluate the derivative. + Second argument at which to evaluate the derivative. + Index of independent variable for partial derivative. + Derivative order. + + + + Creates a function handle for the partial derivative of a bivariate function. + + Bivariate function handle. + Index of independent variable for partial derivative. + Derivative order. + + + + Evaluates the first partial derivative of a bivariate function. + + Bivariate function handle. + First argument at which to evaluate the derivative. + Second argument at which to evaluate the derivative. + Index of independent variable for partial derivative. + + + + Creates a function handle for the first partial derivative of a bivariate function. + + Bivariate function handle. + Index of independent variable for partial derivative. + + + + Class to calculate finite difference coefficients using Taylor series expansion method. + + + For n points, coefficients are calculated up to the maximum derivative order possible (n-1). + The current function value position specifies the "center" for surrounding coefficients. + Selecting the first, middle or last positions represent forward, backwards and central difference methods. + + + + + + + Number of points for finite difference coefficients. Changing this value recalculates the coefficients table. + + + + + Initializes a new instance of the class. + + Number of finite difference coefficients. + + + + Gets the finite difference coefficients for a specified center and order. + + Current function position with respect to coefficients. Must be within point range. + Order of finite difference coefficients. + Vector of finite difference coefficients. + + + + Gets the finite difference coefficients for all orders at a specified center. + + Current function position with respect to coefficients. Must be within point range. + Rectangular array of coefficients, with columns specifing order. + + + + Type of finite different step size. + + + + + The absolute step size value will be used in numerical derivatives, regardless of order or function parameters. + + + + + A base step size value, h, will be scaled according to the function input parameter. A common example is hx = h*(1+abs(x)), however + this may vary depending on implementation. This definition only guarantees that the only scaling will be relative to the + function input parameter and not the order of the finite difference derivative. + + + + + A base step size value, eps (typically machine precision), is scaled according to the finite difference coefficient order + and function input parameter. The initial scaling according to finite different coefficient order can be thought of as producing a + base step size, h, that is equivalent to scaling. This stepsize is then scaled according to the function + input parameter. Although implementation may vary, an example of second order accurate scaling may be (eps)^(1/3)*(1+abs(x)). + + + + + Class to evaluate the numerical derivative of a function using finite difference approximations. + Variable point and center methods can be initialized . + This class can also be used to return function handles (delegates) for a fixed derivative order and variable. + It is possible to evaluate the derivative and partial derivative of univariate and multivariate functions respectively. + + + + + Initializes a NumericalDerivative class with the default 3 point center difference method. + + + + + Initialized a NumericalDerivative class. + + Number of points for finite difference derivatives. + Location of the center with respect to other points. Value ranges from zero to points-1. + + + + Sets and gets the finite difference step size. This value is for each function evaluation if relative stepsize types are used. + If the base step size used in scaling is desired, see . + + + Setting then getting the StepSize may return a different value. This is not unusual since a user-defined step size is converted to a + base-2 representable number to improve finite difference accuracy. + + + + + Sets and gets the base fininte difference step size. This assigned value to this parameter is only used if is set to RelativeX. + However, if the StepType is Relative, it will contain the base step size computed from based on the finite difference order. + + + + + Sets and gets the base finite difference step size. This parameter is only used if is set to Relative. + By default this is set to machine epsilon, from which is computed. + + + + + Sets and gets the location of the center point for the finite difference derivative. + + + + + Number of times a function is evaluated for numerical derivatives. + + + + + Type of step size for computing finite differences. If set to absolute, dx = h. + If set to relative, dx = (1+abs(x))*h^(2/(order+1)). This provides accurate results when + h is approximately equal to the square-root of machine accuracy, epsilon. + + + + + Evaluates the derivative of equidistant points using the finite difference method. + + Vector of points StepSize apart. + Derivative order. + Finite difference step size. + Derivative of points of the specified order. + + + + Evaluates the derivative of a scalar univariate function. + + + Supplying the optional argument currentValue will reduce the number of function evaluations + required to calculate the finite difference derivative. + + Function handle. + Point at which to compute the derivative. + Derivative order. + Current function value at center. + Function derivative at x of the specified order. + + + + Creates a function handle for the derivative of a scalar univariate function. + + Input function handle. + Derivative order. + Function handle that evaluates the derivative of input function at a fixed order. + + + + Evaluates the partial derivative of a multivariate function. + + Multivariate function handle. + Vector at which to evaluate the derivative. + Index of independent variable for partial derivative. + Derivative order. + Current function value at center. + Function partial derivative at x of the specified order. + + + + Evaluates the partial derivatives of a multivariate function array. + + + This function assumes the input vector x is of the correct length for f. + + Multivariate vector function array handle. + Vector at which to evaluate the derivatives. + Index of independent variable for partial derivative. + Derivative order. + Current function value at center. + Vector of functions partial derivatives at x of the specified order. + + + + Creates a function handle for the partial derivative of a multivariate function. + + Input function handle. + Index of the independent variable for partial derivative. + Derivative order. + Function handle that evaluates partial derivative of input function at a fixed order. + + + + Creates a function handle for the partial derivative of a vector multivariate function. + + Input function handle. + Index of the independent variable for partial derivative. + Derivative order. + Function handle that evaluates partial derivative of input function at fixed order. + + + + Evaluates the mixed partial derivative of variable order for multivariate functions. + + + This function recursively uses to evaluate mixed partial derivative. + Therefore, it is more efficient to call for higher order derivatives of + a single independent variable. + + Multivariate function handle. + Points at which to evaluate the derivative. + Vector of indices for the independent variables at descending derivative orders. + Highest order of differentiation. + Current function value at center. + Function mixed partial derivative at x of the specified order. + + + + Evaluates the mixed partial derivative of variable order for multivariate function arrays. + + + This function recursively uses to evaluate mixed partial derivative. + Therefore, it is more efficient to call for higher order derivatives of + a single independent variable. + + Multivariate function array handle. + Vector at which to evaluate the derivative. + Vector of indices for the independent variables at descending derivative orders. + Highest order of differentiation. + Current function value at center. + Function mixed partial derivatives at x of the specified order. + + + + Creates a function handle for the mixed partial derivative of a multivariate function. + + Input function handle. + Vector of indices for the independent variables at descending derivative orders. + Highest derivative order. + Function handle that evaluates the fixed mixed partial derivative of input function at fixed order. + + + + Creates a function handle for the mixed partial derivative of a multivariate vector function. + + Input vector function handle. + Vector of indices for the independent variables at descending derivative orders. + Highest derivative order. + Function handle that evaluates the fixed mixed partial derivative of input function at fixed order. + + + + Resets the evaluation counter. + + + + + Class for evaluating the Hessian of a smooth continuously differentiable function using finite differences. + By default, a central 3-point method is used. + + + + + Number of function evaluations. + + + + + Creates a numerical Hessian object with a three point central difference method. + + + + + Creates a numerical Hessian with a specified differentiation scheme. + + Number of points for Hessian evaluation. + Center point for differentiation. + + + + Evaluates the Hessian of the scalar univariate function f at points x. + + Scalar univariate function handle. + Point at which to evaluate Hessian. + Hessian tensor. + + + + Evaluates the Hessian of a multivariate function f at points x. + + + This method of computing the Hessian is only vaid for Lipschitz continuous functions. + The function mirrors the Hessian along the diagonal since d2f/dxdy = d2f/dydx for continuously differentiable functions. + + Multivariate function handle.> + Points at which to evaluate Hessian.> + Hessian tensor. + + + + Resets the function evaluation counter for the Hessian. + + + + + Class for evaluating the Jacobian of a function using finite differences. + By default, a central 3-point method is used. + + + + + Number of function evaluations. + + + + + Creates a numerical Jacobian object with a three point central difference method. + + + + + Creates a numerical Jacobian with a specified differentiation scheme. + + Number of points for Jacobian evaluation. + Center point for differentiation. + + + + Evaluates the Jacobian of scalar univariate function f at point x. + + Scalar univariate function handle. + Point at which to evaluate Jacobian. + Jacobian vector. + + + + Evaluates the Jacobian of a multivariate function f at vector x. + + + This function assumes that the length of vector x consistent with the argument count of f. + + Multivariate function handle. + Points at which to evaluate Jacobian. + Jacobian vector. + + + + Evaluates the Jacobian of a multivariate function f at vector x given a current function value. + + + To minimize the number of function evaluations, a user can supply the current value of the function + to be used in computing the Jacobian. This value must correspond to the "center" location for the + finite differencing. If a scheme is used where the center value is not evaluated, this will provide no + added efficiency. This method also assumes that the length of vector x consistent with the argument count of f. + + Multivariate function handle. + Points at which to evaluate Jacobian. + Current function value at finite difference center. + Jacobian vector. + + + + Evaluates the Jacobian of a multivariate function array f at vector x. + + Multivariate function array handle. + Vector at which to evaluate Jacobian. + Jacobian matrix. + + + + Evaluates the Jacobian of a multivariate function array f at vector x given a vector of current function values. + + + To minimize the number of function evaluations, a user can supply a vector of current values of the functions + to be used in computing the Jacobian. These value must correspond to the "center" location for the + finite differencing. If a scheme is used where the center value is not evaluated, this will provide no + added efficiency. This method also assumes that the length of vector x consistent with the argument count of f. + + Multivariate function array handle. + Vector at which to evaluate Jacobian. + Vector of current function values. + Jacobian matrix. + + + + Resets the function evaluation counter for the Jacobian. + + + + + Metrics to measure the distance between two structures. + + + + + Sum of Absolute Difference (SAD), i.e. the L1-norm (Manhattan) of the difference. + + + + + Sum of Absolute Difference (SAD), i.e. the L1-norm (Manhattan) of the difference. + + + + + Sum of Absolute Difference (SAD), i.e. the L1-norm (Manhattan) of the difference. + + + + + Mean-Absolute Error (MAE), i.e. the normalized L1-norm (Manhattan) of the difference. + + + + + Mean-Absolute Error (MAE), i.e. the normalized L1-norm (Manhattan) of the difference. + + + + + Mean-Absolute Error (MAE), i.e. the normalized L1-norm (Manhattan) of the difference. + + + + + Sum of Squared Difference (SSD), i.e. the squared L2-norm (Euclidean) of the difference. + + + + + Sum of Squared Difference (SSD), i.e. the squared L2-norm (Euclidean) of the difference. + + + + + Sum of Squared Difference (SSD), i.e. the squared L2-norm (Euclidean) of the difference. + + + + + Mean-Squared Error (MSE), i.e. the normalized squared L2-norm (Euclidean) of the difference. + + + + + Mean-Squared Error (MSE), i.e. the normalized squared L2-norm (Euclidean) of the difference. + + + + + Mean-Squared Error (MSE), i.e. the normalized squared L2-norm (Euclidean) of the difference. + + + + + Euclidean Distance, i.e. the L2-norm of the difference. + + + + + Euclidean Distance, i.e. the L2-norm of the difference. + + + + + Euclidean Distance, i.e. the L2-norm of the difference. + + + + + Manhattan Distance, i.e. the L1-norm of the difference. + + + + + Manhattan Distance, i.e. the L1-norm of the difference. + + + + + Manhattan Distance, i.e. the L1-norm of the difference. + + + + + Chebyshev Distance, i.e. the Infinity-norm of the difference. + + + + + Chebyshev Distance, i.e. the Infinity-norm of the difference. + + + + + Chebyshev Distance, i.e. the Infinity-norm of the difference. + + + + + Minkowski Distance, i.e. the generalized p-norm of the difference. + + + + + Minkowski Distance, i.e. the generalized p-norm of the difference. + + + + + Minkowski Distance, i.e. the generalized p-norm of the difference. + + + + + Canberra Distance, a weighted version of the L1-norm of the difference. + + + + + Canberra Distance, a weighted version of the L1-norm of the difference. + + + + + Cosine Distance, representing the angular distance while ignoring the scale. + + + + + Cosine Distance, representing the angular distance while ignoring the scale. + + + + + Hamming Distance, i.e. the number of positions that have different values in the vectors. + + + + + Hamming Distance, i.e. the number of positions that have different values in the vectors. + + + + + Pearson's distance, i.e. 1 - the person correlation coefficient. + + + + + Jaccard distance, i.e. 1 - the Jaccard index. + + Thrown if a or b are null. + Throw if a and b are of different lengths. + Jaccard distance. + + + + Jaccard distance, i.e. 1 - the Jaccard index. + + Thrown if a or b are null. + Throw if a and b are of different lengths. + Jaccard distance. + + + + Discrete Univariate Bernoulli distribution. + The Bernoulli distribution is a distribution over bits. The parameter + p specifies the probability that a 1 is generated. + Wikipedia - Bernoulli distribution. + + + + + Initializes a new instance of the Bernoulli class. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + If the Bernoulli parameter is not in the range [0,1]. + + + + Initializes a new instance of the Bernoulli class. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + The random number generator which is used to draw random samples. + If the Bernoulli parameter is not in the range [0,1]. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Gets the probability of generating a one. Range: 0 ≤ p ≤ 1. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the mode of the distribution. + + + + + Gets all modes of the distribution. + + + + + Gets the median of the distribution. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + the cumulative distribution at location . + + + + + Generates one sample from the Bernoulli distribution. + + The random source to use. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + A random sample from the Bernoulli distribution. + + + + Samples a Bernoulli distributed random variable. + + A sample from the Bernoulli distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of Bernoulli distributed random variables. + + a sequence of samples from the distribution. + + + + Samples a Bernoulli distributed random variable. + + The random number generator to use. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + A sample from the Bernoulli distribution. + + + + Samples a sequence of Bernoulli distributed random variables. + + The random number generator to use. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + a sequence of samples from the distribution. + + + + Samples a Bernoulli distributed random variable. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + A sample from the Bernoulli distribution. + + + + Samples a sequence of Bernoulli distributed random variables. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + a sequence of samples from the distribution. + + + + Continuous Univariate Beta distribution. + For details about this distribution, see + Wikipedia - Beta distribution. + + + There are a few special cases for the parameterization of the Beta distribution. When both + shape parameters are positive infinity, the Beta distribution degenerates to a point distribution + at 0.5. When one of the shape parameters is positive infinity, the distribution degenerates to a point + distribution at the positive infinity. When both shape parameters are 0.0, the Beta distribution + degenerates to a Bernoulli distribution with parameter 0.5. When one shape parameter is 0.0, the + distribution degenerates to a point distribution at the non-zero shape parameter. + + + + + Initializes a new instance of the Beta class. + + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + + + + Initializes a new instance of the Beta class. + + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + A string representation of the Beta distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + + + + Gets the α shape parameter of the Beta distribution. Range: α ≥ 0. + + + + + Gets the β shape parameter of the Beta distribution. Range: β ≥ 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the Beta distribution. + + + + + Gets the variance of the Beta distribution. + + + + + Gets the standard deviation of the Beta distribution. + + + + + Gets the entropy of the Beta distribution. + + + + + Gets the skewness of the Beta distribution. + + + + + Gets the mode of the Beta distribution; when there are multiple answers, this routine will return 0.5. + + + + + Gets the median of the Beta distribution. + + + + + Gets the minimum of the Beta distribution. + + + + + Gets the maximum of the Beta distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Generates a sample from the Beta distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Beta distribution. + + a sequence of samples from the distribution. + + + + Samples Beta distributed random variables by sampling two Gamma variables and normalizing. + + The random number generator to use. + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + a random number from the Beta distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Generates a sample from the distribution. + + The random number generator to use. + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The α shape parameter of the Beta distribution. Range: α ≥ 0. + The β shape parameter of the Beta distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Initializes a new instance of the BetaScaled class. + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + + + + Initializes a new instance of the BetaScaled class. + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The random number generator which is used to draw random samples. + + + + Create a Beta PERT distribution, used in risk analysis and other domains where an expert forecast + is used to construct an underlying beta distribution. + + The minimum value. + The maximum value. + The most likely value (mode). + The random number generator which is used to draw random samples. + The Beta distribution derived from the PERT parameters. + + + + A string representation of the distribution. + + A string representation of the BetaScaled distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + + + + Gets the α shape parameter of the BetaScaled distribution. Range: α > 0. + + + + + Gets the β shape parameter of the BetaScaled distribution. Range: β > 0. + + + + + Gets the location (μ) of the BetaScaled distribution. + + + + + Gets the scale (σ) of the BetaScaled distribution. Range: σ > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the BetaScaled distribution. + + + + + Gets the variance of the BetaScaled distribution. + + + + + Gets the standard deviation of the BetaScaled distribution. + + + + + Gets the entropy of the BetaScaled distribution. + + + + + Gets the skewness of the BetaScaled distribution. + + + + + Gets the mode of the BetaScaled distribution; when there are multiple answers, this routine will return 0.5. + + + + + Gets the median of the BetaScaled distribution. + + + + + Gets the minimum of the BetaScaled distribution. + + + + + Gets the maximum of the BetaScaled distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Generates a sample from the distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Generates a sample from the distribution. + + The random number generator to use. + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The α shape parameter of the BetaScaled distribution. Range: α > 0. + The β shape parameter of the BetaScaled distribution. Range: β > 0. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Discrete Univariate Binomial distribution. + For details about this distribution, see + Wikipedia - Binomial distribution. + + + The distribution is parameterized by a probability (between 0.0 and 1.0). + + + + + Initializes a new instance of the Binomial class. + + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + If is not in the interval [0.0,1.0]. + If is negative. + + + + Initializes a new instance of the Binomial class. + + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + The random number generator which is used to draw random samples. + If is not in the interval [0.0,1.0]. + If is negative. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + + + + Gets the success probability in each trial. Range: 0 ≤ p ≤ 1. + + + + + Gets the number of trials. Range: n ≥ 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the mode of the distribution. + + + + + Gets all modes of the distribution. + + + + + Gets the median of the distribution. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + the cumulative distribution at location . + + + + + Generates a sample from the Binomial distribution without doing parameter checking. + + The random number generator to use. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + The number of successful trials. + + + + Samples a Binomially distributed random variable. + + The number of successes in N trials. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of Binomially distributed random variables. + + a sequence of successes in N trials. + + + + Samples a binomially distributed random variable. + + The random number generator to use. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + The number of successes in trials. + + + + Samples a sequence of binomially distributed random variable. + + The random number generator to use. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + a sequence of successes in trials. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + a sequence of successes in trials. + + + + Samples a binomially distributed random variable. + + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + The number of successes in trials. + + + + Samples a sequence of binomially distributed random variable. + + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + a sequence of successes in trials. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The success probability (p) in each trial. Range: 0 ≤ p ≤ 1. + The number of trials (n). Range: n ≥ 0. + a sequence of successes in trials. + + + + Discrete Univariate Categorical distribution. + For details about this distribution, see + Wikipedia - Categorical distribution. This + distribution is sometimes called the Discrete distribution. + + + The distribution is parameterized by a vector of ratios: in other words, the parameter + does not have to be normalized and sum to 1. The reason is that some vectors can't be exactly normalized + to sum to 1 in floating point representation. + + + Support: 0..k where k = length(probability mass array)-1 + + + + + Initializes a new instance of the Categorical class. + + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + If any of the probabilities are negative or do not sum to one. + + + + Initializes a new instance of the Categorical class. + + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + The random number generator which is used to draw random samples. + If any of the probabilities are negative or do not sum to one. + + + + Initializes a new instance of the Categorical class from a . The distribution + will not be automatically updated when the histogram changes. The categorical distribution will have + one value for each bucket and a probability for that value proportional to the bucket count. + + The histogram from which to create the categorical variable. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Checks whether the parameters of the distribution are valid. + + An array of nonnegative ratios: this array does not need to be normalized as this is often impossible using floating point arithmetic. + If any of the probabilities are negative returns false, or if the sum of parameters is 0.0; otherwise true + + + + Checks whether the parameters of the distribution are valid. + + An array of nonnegative ratios: this array does not need to be normalized as this is often impossible using floating point arithmetic. + If any of the probabilities are negative returns false, or if the sum of parameters is 0.0; otherwise true + + + + Gets the probability mass vector (non-negative ratios) of the multinomial. + + Sometimes the normalized probability vector cannot be represented exactly in a floating point representation. + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + Throws a . + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Gets he mode of the distribution. + + Throws a . + + + + Gets the median of the distribution. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. + + A real number between 0 and 1. + An integer between 0 and the size of the categorical (exclusive), that corresponds to the inverse CDF for the given probability. + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. + + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + A real number between 0 and 1. + An integer between 0 and the size of the categorical (exclusive), that corresponds to the inverse CDF for the given probability. + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. + + An array corresponding to a CDF for a categorical distribution. Not assumed to be normalized. + A real number between 0 and 1. + An integer between 0 and the size of the categorical (exclusive), that corresponds to the inverse CDF for the given probability. + + + + Computes the cumulative distribution function. This method performs no parameter checking. + If the probability mass was normalized, the resulting cumulative distribution is normalized as well (up to numerical errors). + + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + An array representing the unnormalized cumulative distribution function. + + + + Returns one trials from the categorical distribution. + + The random number generator to use. + The (unnormalized) cumulative distribution of the probability distribution. + One sample from the categorical distribution implied by . + + + + Samples a Binomially distributed random variable. + + The number of successful trials. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of Bernoulli distributed random variables. + + a sequence of successful trial counts. + + + + Samples one categorical distributed random variable; also known as the Discrete distribution. + + The random number generator to use. + An array of nonnegative ratios. Not assumed to be normalized. + One random integer between 0 and the size of the categorical (exclusive). + + + + Samples a categorically distributed random variable. + + The random number generator to use. + An array of nonnegative ratios. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + An array of nonnegative ratios. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Samples one categorical distributed random variable; also known as the Discrete distribution. + + An array of nonnegative ratios. Not assumed to be normalized. + One random integer between 0 and the size of the categorical (exclusive). + + + + Samples a categorically distributed random variable. + + An array of nonnegative ratios. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + An array of nonnegative ratios. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Samples one categorical distributed random variable; also known as the Discrete distribution. + + The random number generator to use. + An array of the cumulative distribution. Not assumed to be normalized. + One random integer between 0 and the size of the categorical (exclusive). + + + + Samples a categorically distributed random variable. + + The random number generator to use. + An array of the cumulative distribution. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + An array of the cumulative distribution. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Samples one categorical distributed random variable; also known as the Discrete distribution. + + An array of the cumulative distribution. Not assumed to be normalized. + One random integer between 0 and the size of the categorical (exclusive). + + + + Samples a categorically distributed random variable. + + An array of the cumulative distribution. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + An array of the cumulative distribution. Not assumed to be normalized. + random integers between 0 and the size of the categorical (exclusive). + + + + Continuous Univariate Cauchy distribution. + The Cauchy distribution is a symmetric continuous probability distribution. For details about this distribution, see + Wikipedia - Cauchy distribution. + + + + + Initializes a new instance of the class with the location parameter set to 0 and the scale parameter set to 1 + + + + + Initializes a new instance of the class. + + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + + + + Initializes a new instance of the class. + + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + + + + Gets the location (x0) of the distribution. + + + + + Gets the scale (γ) of the distribution. Range: γ > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Draws a random sample from the distribution. + + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Cauchy distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + the inverse cumulative density at . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The location (x0) of the distribution. + The scale (γ) of the distribution. Range: γ > 0. + a sequence of samples from the distribution. + + + + Continuous Univariate Chi distribution. + This distribution is a continuous probability distribution. The distribution usually arises when a k-dimensional vector's orthogonal + components are independent and each follow a standard normal distribution. The length of the vector will + then have a chi distribution. + Wikipedia - Chi distribution. + + + + + Initializes a new instance of the class. + + The degrees of freedom (k) of the distribution. Range: k > 0. + + + + Initializes a new instance of the class. + + The degrees of freedom (k) of the distribution. Range: k > 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The degrees of freedom (k) of the distribution. Range: k > 0. + + + + Gets the degrees of freedom (k) of the Chi distribution. Range: k > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Generates a sample from the Chi distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Chi distribution. + + a sequence of samples from the distribution. + + + + Samples the distribution. + + The random number generator to use. + The degrees of freedom (k) of the distribution. Range: k > 0. + a random number from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The degrees of freedom (k) of the distribution. Range: k > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The degrees of freedom (k) of the distribution. Range: k > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The degrees of freedom (k) of the distribution. Range: k > 0. + the cumulative distribution at location . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The degrees of freedom (k) of the distribution. Range: k > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sequence of samples from the distribution. + + + + Continuous Univariate Chi-Squared distribution. + This distribution is a sum of the squares of k independent standard normal random variables. + Wikipedia - ChiSquare distribution. + + + + + Initializes a new instance of the class. + + The degrees of freedom (k) of the distribution. Range: k > 0. + + + + Initializes a new instance of the class. + + The degrees of freedom (k) of the distribution. Range: k > 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The degrees of freedom (k) of the distribution. Range: k > 0. + + + + Gets the degrees of freedom (k) of the Chi-Squared distribution. Range: k > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Generates a sample from the ChiSquare distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the ChiSquare distribution. + + a sequence of samples from the distribution. + + + + Samples the distribution. + + The random number generator to use. + The degrees of freedom (k) of the distribution. Range: k > 0. + a random number from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The degrees of freedom (k) of the distribution. Range: k > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The degrees of freedom (k) of the distribution. Range: k > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The degrees of freedom (k) of the distribution. Range: k > 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The degrees of freedom (k) of the distribution. Range: k > 0. + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + Generates a sample from the ChiSquare distribution. + + The random number generator to use. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Generates a sample from the ChiSquare distribution. + + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The degrees of freedom (k) of the distribution. Range: k > 0. + a sample from the distribution. + + + + Continuous Univariate Uniform distribution. + The continuous uniform distribution is a distribution over real numbers. For details about this distribution, see + Wikipedia - Continuous uniform distribution. + + + + + Initializes a new instance of the ContinuousUniform class with lower bound 0 and upper bound 1. + + + + + Initializes a new instance of the ContinuousUniform class with given lower and upper bounds. + + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + If the upper bound is smaller than the lower bound. + + + + Initializes a new instance of the ContinuousUniform class with given lower and upper bounds. + + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + The random number generator which is used to draw random samples. + If the upper bound is smaller than the lower bound. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + + + + Gets the lower bound of the distribution. + + + + + Gets the upper bound of the distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + + Gets the median of the distribution. + + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Generates a sample from the ContinuousUniform distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the ContinuousUniform distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + the inverse cumulative density at . + + + + + Generates a sample from the ContinuousUniform distribution. + + The random number generator to use. + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + a uniformly distributed sample. + + + + Generates a sequence of samples from the ContinuousUniform distribution. + + The random number generator to use. + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + a sequence of uniformly distributed samples. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + a sequence of samples from the distribution. + + + + Generates a sample from the ContinuousUniform distribution. + + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + a uniformly distributed sample. + + + + Generates a sequence of samples from the ContinuousUniform distribution. + + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + a sequence of uniformly distributed samples. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + Lower bound. Range: lower ≤ upper. + Upper bound. Range: lower ≤ upper. + a sequence of samples from the distribution. + + + + Discrete Univariate Conway-Maxwell-Poisson distribution. + The Conway-Maxwell-Poisson distribution is a generalization of the Poisson, Geometric and Bernoulli + distributions. It is parameterized by two real numbers "lambda" and "nu". For + + nu = 0 the distribution reverts to a Geometric distribution + nu = 1 the distribution reverts to the Poisson distribution + nu -> infinity the distribution converges to a Bernoulli distribution + + This implementation will cache the value of the normalization constant. + Wikipedia - ConwayMaxwellPoisson distribution. + + + + + The mean of the distribution. + + + + + The variance of the distribution. + + + + + Caches the value of the normalization constant. + + + + + Since many properties of the distribution can only be computed approximately, the tolerance + level specifies how much error we accept. + + + + + Initializes a new instance of the class. + + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Initializes a new instance of the class. + + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + The random number generator which is used to draw random samples. + + + + Returns a that represents this instance. + + A that represents this instance. + + + + Tests whether the provided values are valid parameters for this distribution. + + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Gets the lambda (λ) parameter. Range: λ > 0. + + + + + Gets the rate of decay (ν) parameter. Range: ν ≥ 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution + + + + + Gets the median of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + the cumulative distribution at location . + + + + + Gets the normalization constant of the Conway-Maxwell-Poisson distribution. + + + + + Computes an approximate normalization constant for the CMP distribution. + + The lambda (λ) parameter for the CMP distribution. + The rate of decay (ν) parameter for the CMP distribution. + + an approximate normalization constant for the CMP distribution. + + + + + Returns one trials from the distribution. + + The random number generator to use. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + The z parameter. + + One sample from the distribution implied by , , and . + + + + + Samples a Conway-Maxwell-Poisson distributed random variable. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Samples a sequence of a Conway-Maxwell-Poisson distributed random variables. + + + a sequence of samples from a Conway-Maxwell-Poisson distribution. + + + + + Samples a random variable. + + The random number generator to use. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Samples a sequence of this random variable. + + The random number generator to use. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Samples a random variable. + + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Samples a sequence of this random variable. + + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The lambda (λ) parameter. Range: λ > 0. + The rate of decay (ν) parameter. Range: ν ≥ 0. + + + + Multivariate Dirichlet distribution. For details about this distribution, see + Wikipedia - Dirichlet distribution. + + + + + Initializes a new instance of the Dirichlet class. The distribution will + be initialized with the default random number generator. + + An array with the Dirichlet parameters. + + + + Initializes a new instance of the Dirichlet class. The distribution will + be initialized with the default random number generator. + + An array with the Dirichlet parameters. + The random number generator which is used to draw random samples. + + + + Initializes a new instance of the class. + random number generator. + The value of each parameter of the Dirichlet distribution. + The dimension of the Dirichlet distribution. + + + + Initializes a new instance of the class. + random number generator. + The value of each parameter of the Dirichlet distribution. + The dimension of the Dirichlet distribution. + The random number generator which is used to draw random samples. + + + + Returns a that represents this instance. + + + A that represents this instance. + + + + + Tests whether the provided values are valid parameters for this distribution. + No parameter can be less than zero and at least one parameter should be larger than zero. + + The parameters of the Dirichlet distribution. + + + + Gets or sets the parameters of the Dirichlet distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the dimension of the Dirichlet distribution. + + + + + Gets the sum of the Dirichlet parameters. + + + + + Gets the mean of the Dirichlet distribution. + + + + + Gets the variance of the Dirichlet distribution. + + + + + Gets the entropy of the distribution. + + + + + Computes the density of the distribution. + + The locations at which to compute the density. + the density at . + The Dirichlet distribution requires that the sum of the components of x equals 1. + You can also leave out the last component, and it will be computed from the others. + + + + Computes the log density of the distribution. + + The locations at which to compute the density. + the density at . + + + + Samples a Dirichlet distributed random vector. + + A sample from this distribution. + + + + Samples a Dirichlet distributed random vector. + + The random number generator to use. + The Dirichlet distribution parameter. + a sample from the distribution. + + + + Discrete Univariate Uniform distribution. + The discrete uniform distribution is a distribution over integers. The distribution + is parameterized by a lower and upper bound (both inclusive). + Wikipedia - Discrete uniform distribution. + + + + + Initializes a new instance of the DiscreteUniform class. + + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + + + + Initializes a new instance of the DiscreteUniform class. + + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + The random number generator which is used to draw random samples. + + + + Returns a that represents this instance. + + + A that represents this instance. + + + + + Tests whether the provided values are valid parameters for this distribution. + + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + + + + Gets the inclusive lower bound of the probability distribution. + + + + + Gets the inclusive upper bound of the probability distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the mode of the distribution; since every element in the domain has the same probability this method returns the middle one. + + + + + Gets the median of the distribution. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + the cumulative distribution at location . + + + + + Generates one sample from the discrete uniform distribution. This method does not do any parameter checking. + + The random source to use. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + A random sample from the discrete uniform distribution. + + + + Draws a random sample from the distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of uniformly distributed random variables. + + a sequence of samples from the distribution. + + + + Samples a uniformly distributed random variable. + + The random number generator to use. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + A sample from the discrete uniform distribution. + + + + Samples a sequence of uniformly distributed random variables. + + The random number generator to use. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + a sequence of samples from the discrete uniform distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + a sequence of samples from the discrete uniform distribution. + + + + Samples a uniformly distributed random variable. + + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + A sample from the discrete uniform distribution. + + + + Samples a sequence of uniformly distributed random variables. + + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + a sequence of samples from the discrete uniform distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + Lower bound, inclusive. Range: lower ≤ upper. + Upper bound, inclusive. Range: lower ≤ upper. + a sequence of samples from the discrete uniform distribution. + + + + Continuous Univariate Erlang distribution. + This distribution is is a continuous probability distribution with wide applicability primarily due to its + relation to the exponential and Gamma distributions. + Wikipedia - Erlang distribution. + + + + + Initializes a new instance of the class. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + + + + Initializes a new instance of the class. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + The random number generator which is used to draw random samples. + + + + Constructs a Erlang distribution from a shape and scale parameter. The distribution will + be initialized with the default random number generator. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The scale (μ) of the Erlang distribution. Range: μ ≥ 0. + The random number generator which is used to draw random samples. Optional, can be null. + + + + Constructs a Erlang distribution from a shape and inverse scale parameter. The distribution will + be initialized with the default random number generator. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + The random number generator which is used to draw random samples. Optional, can be null. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + + + + Gets the shape (k) of the Erlang distribution. Range: k ≥ 0. + + + + + Gets the rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + + + + + Gets the scale of the Erlang distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum value. + + + + + Gets the Maximum value. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Generates a sample from the Erlang distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Erlang distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + the cumulative distribution at location . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The shape (k) of the Erlang distribution. Range: k ≥ 0. + The rate or inverse scale (λ) of the Erlang distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Continuous Univariate Exponential distribution. + The exponential distribution is a distribution over the real numbers parameterized by one non-negative parameter. + Wikipedia - exponential distribution. + + + + + Initializes a new instance of the class. + + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + + + + Initializes a new instance of the class. + + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + + + + Gets the rate (λ) parameter of the distribution. Range: λ ≥ 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Draws a random sample from the distribution. + + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Exponential distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + the inverse cumulative density at . + + + + + Draws a random sample from the distribution. + + The random number generator to use. + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Generates a sequence of samples from the Exponential distribution. + + The random number generator to use. + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Draws a random sample from the distribution. + + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Generates a sequence of samples from the Exponential distribution. + + The rate (λ) parameter of the distribution. Range: λ ≥ 0. + a sequence of samples from the distribution. + + + + Continuous Univariate F-distribution, also known as Fisher-Snedecor distribution. + For details about this distribution, see + Wikipedia - FisherSnedecor distribution. + + + + + Initializes a new instance of the class. + + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + + + + Initializes a new instance of the class. + + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + + + + Gets the first degree of freedom (d1) of the distribution. Range: d1 > 0. + + + + + Gets the second degree of freedom (d2) of the distribution. Range: d2 > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Generates a sample from the FisherSnedecor distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the FisherSnedecor distribution. + + a sequence of samples from the distribution. + + + + Generates one sample from the FisherSnedecor distribution without parameter checking. + + The random number generator to use. + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + a FisherSnedecor distributed random number. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Generates a sample from the distribution. + + The random number generator to use. + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The first degree of freedom (d1) of the distribution. Range: d1 > 0. + The second degree of freedom (d2) of the distribution. Range: d2 > 0. + a sequence of samples from the distribution. + + + + Continuous Univariate Gamma distribution. + For details about this distribution, see + Wikipedia - Gamma distribution. + + + The Gamma distribution is parametrized by a shape and inverse scale parameter. When we want + to specify a Gamma distribution which is a point distribution we set the shape parameter to be the + location of the point distribution and the inverse scale as positive infinity. The distribution + with shape and inverse scale both zero is undefined. + + Random number generation for the Gamma distribution is based on the algorithm in: + "A Simple Method for Generating Gamma Variables" - Marsaglia & Tsang + ACM Transactions on Mathematical Software, Vol. 26, No. 3, September 2000, Pages 363–372. + + + + + Initializes a new instance of the Gamma class. + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + + + + Initializes a new instance of the Gamma class. + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + The random number generator which is used to draw random samples. + + + + Constructs a Gamma distribution from a shape and scale parameter. The distribution will + be initialized with the default random number generator. + + The shape (k) of the Gamma distribution. Range: k ≥ 0. + The scale (θ) of the Gamma distribution. Range: θ ≥ 0 + The random number generator which is used to draw random samples. Optional, can be null. + + + + Constructs a Gamma distribution from a shape and inverse scale parameter. The distribution will + be initialized with the default random number generator. + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + The random number generator which is used to draw random samples. Optional, can be null. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + + + + Gets or sets the shape (k, α) of the Gamma distribution. Range: α ≥ 0. + + + + + Gets or sets the rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + + + + + Gets or sets the scale (θ) of the Gamma distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the Gamma distribution. + + + + + Gets the variance of the Gamma distribution. + + + + + Gets the standard deviation of the Gamma distribution. + + + + + Gets the entropy of the Gamma distribution. + + + + + Gets the skewness of the Gamma distribution. + + + + + Gets the mode of the Gamma distribution. + + + + + Gets the median of the Gamma distribution. + + + + + Gets the minimum of the Gamma distribution. + + + + + Gets the maximum of the Gamma distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Generates a sample from the Gamma distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Gamma distribution. + + a sequence of samples from the distribution. + + + + Sampling implementation based on: + "A Simple Method for Generating Gamma Variables" - Marsaglia & Tsang + ACM Transactions on Mathematical Software, Vol. 26, No. 3, September 2000, Pages 363–372. + This method performs no parameter checks. + + The random number generator to use. + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + A sample from a Gamma distributed random variable. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + the inverse cumulative density at . + + + + + Generates a sample from the Gamma distribution. + + The random number generator to use. + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the Gamma distribution. + + The random number generator to use. + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Generates a sample from the Gamma distribution. + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the Gamma distribution. + + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The shape (k, α) of the Gamma distribution. Range: α ≥ 0. + The rate or inverse scale (β) of the Gamma distribution. Range: β ≥ 0. + a sequence of samples from the distribution. + + + + Discrete Univariate Geometric distribution. + The Geometric distribution is a distribution over positive integers parameterized by one positive real number. + This implementation of the Geometric distribution will never generate 0's. + Wikipedia - geometric distribution. + + + + + Initializes a new instance of the Geometric class. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Initializes a new instance of the Geometric class. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + The random number generator which is used to draw random samples. + + + + Returns a that represents this instance. + + A that represents this instance. + + + + Tests whether the provided values are valid parameters for this distribution. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Gets the probability of generating a one. Range: 0 ≤ p ≤ 1. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + Throws a not supported exception. + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + the cumulative distribution at location . + + + + + Returns one sample from the distribution. + + The random number generator to use. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + One sample from the distribution implied by . + + + + Samples a Geometric distributed random variable. + + A sample from the Geometric distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of Geometric distributed random variables. + + a sequence of samples from the distribution. + + + + Samples a random variable. + + The random number generator to use. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Samples a sequence of this random variable. + + The random number generator to use. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Samples a random variable. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Samples a sequence of this random variable. + + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The probability (p) of generating one. Range: 0 ≤ p ≤ 1. + + + + Discrete Univariate Hypergeometric distribution. + This distribution is a discrete probability distribution that describes the number of successes in a sequence + of n draws from a finite population without replacement, just as the binomial distribution + describes the number of successes for draws with replacement + Wikipedia - Hypergeometric distribution. + + + + + Initializes a new instance of the Hypergeometric class. + + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Initializes a new instance of the Hypergeometric class. + + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + The random number generator which is used to draw random samples. + + + + Returns a that represents this instance. + + + A that represents this instance. + + + + + Tests whether the provided values are valid parameters for this distribution. + + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the size of the population (N). + + + + + Gets the number of draws without replacement (n). + + + + + Gets the number successes within the population (K, M). + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + the cumulative distribution at location . + + + + + Generates a sample from the Hypergeometric distribution without doing parameter checking. + + The random number generator to use. + The size of the population (N). + The number successes within the population (K, M). + The n parameter of the distribution. + a random number from the Hypergeometric distribution. + + + + Samples a Hypergeometric distributed random variable. + + The number of successes in n trials. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of Hypergeometric distributed random variables. + + a sequence of successes in n trials. + + + + Samples a random variable. + + The random number generator to use. + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Samples a sequence of this random variable. + + The random number generator to use. + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Samples a random variable. + + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Samples a sequence of this random variable. + + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The size of the population (N). + The number successes within the population (K, M). + The number of draws without replacement (n). + + + + Continuous Univariate Probability Distribution. + + + + + + Gets the mode of the distribution. + + + + + Gets the smallest element in the domain of the distribution which can be represented by a double. + + + + + Gets the largest element in the domain of the distribution which can be represented by a double. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + Draws a random sample from the distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Draws a sequence of random samples from the distribution. + + an infinite sequence of samples from the distribution. + + + + Discrete Univariate Probability Distribution. + + + + + + Gets the mode of the distribution. + + + + + Gets the smallest element in the domain of the distribution which can be represented by an integer. + + + + + Gets the largest element in the domain of the distribution which can be represented by an integer. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Draws a random sample from the distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Draws a sequence of random samples from the distribution. + + an infinite sequence of samples from the distribution. + + + + Probability Distribution. + + + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Continuous Univariate Inverse Gamma distribution. + The inverse Gamma distribution is a distribution over the positive real numbers parameterized by + two positive parameters. + Wikipedia - InverseGamma distribution. + + + + + Initializes a new instance of the class. + + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + + + + Initializes a new instance of the class. + + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + + + + Gets or sets the shape (α) parameter. Range: α > 0. + + + + + Gets or sets The scale (β) parameter. Range: β > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + Throws . + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Draws a random sample from the distribution. + + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Cauchy distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + the cumulative distribution at location . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The shape (α) of the distribution. Range: α > 0. + The scale (β) of the distribution. Range: β > 0. + a sequence of samples from the distribution. + + + + Multivariate Inverse Wishart distribution. This distribution is + parameterized by the degrees of freedom nu and the scale matrix S. The inverse Wishart distribution + is the conjugate prior for the covariance matrix of a multivariate normal distribution. + Wikipedia - Inverse-Wishart distribution. + + + + + Caches the Cholesky factorization of the scale matrix. + + + + + Initializes a new instance of the class. + + The degree of freedom (ν) for the inverse Wishart distribution. + The scale matrix (Ψ) for the inverse Wishart distribution. + + + + Initializes a new instance of the class. + + The degree of freedom (ν) for the inverse Wishart distribution. + The scale matrix (Ψ) for the inverse Wishart distribution. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The degree of freedom (ν) for the inverse Wishart distribution. + The scale matrix (Ψ) for the inverse Wishart distribution. + + + + Gets or sets the degree of freedom (ν) for the inverse Wishart distribution. + + + + + Gets or sets the scale matrix (Ψ) for the inverse Wishart distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean. + + The mean of the distribution. + + + + Gets the mode of the distribution. + + The mode of the distribution. + A. O'Hagan, and J. J. Forster (2004). Kendall's Advanced Theory of Statistics: Bayesian Inference. 2B (2 ed.). Arnold. ISBN 0-340-80752-0. + + + + Gets the variance of the distribution. + + The variance of the distribution. + Kanti V. Mardia, J. T. Kent and J. M. Bibby (1979). Multivariate Analysis. + + + + Evaluates the probability density function for the inverse Wishart distribution. + + The matrix at which to evaluate the density at. + If the argument does not have the same dimensions as the scale matrix. + the density at . + + + + Samples an inverse Wishart distributed random variable by sampling + a Wishart random variable and inverting the matrix. + + a sample from the distribution. + + + + Samples an inverse Wishart distributed random variable by sampling + a Wishart random variable and inverting the matrix. + + The random number generator to use. + The degree of freedom (ν) for the inverse Wishart distribution. + The scale matrix (Ψ) for the inverse Wishart distribution. + a sample from the distribution. + + + + Univariate Probability Distribution. + + + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the median of the distribution. + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Continuous Univariate Laplace distribution. + The Laplace distribution is a distribution over the real numbers parameterized by a mean and + scale parameter. The PDF is: + p(x) = \frac{1}{2 * scale} \exp{- |x - mean| / scale}. + Wikipedia - Laplace distribution. + + + + + Initializes a new instance of the class (location = 0, scale = 1). + + + + + Initializes a new instance of the class. + + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + If is negative. + + + + Initializes a new instance of the class. + + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + The random number generator which is used to draw random samples. + If is negative. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + + + + Gets the location (μ) of the Laplace distribution. + + + + + Gets the scale (b) of the Laplace distribution. Range: b > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Samples a Laplace distributed random variable. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sample from the Laplace distribution. + + a sample from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + the cumulative distribution at location . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The location (μ) of the distribution. + The scale (b) of the distribution. Range: b > 0. + a sequence of samples from the distribution. + + + + Continuous Univariate Log-Normal distribution. + For details about this distribution, see + Wikipedia - Log-Normal distribution. + + + + + Initializes a new instance of the class. + The distribution will be initialized with the default + random number generator. + + The log-scale (μ) of the logarithm of the distribution. + The shape (σ) of the logarithm of the distribution. Range: σ ≥ 0. + + + + Initializes a new instance of the class. + The distribution will be initialized with the default + random number generator. + + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + The random number generator which is used to draw random samples. + + + + Constructs a log-normal distribution with the desired mu and sigma parameters. + + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + The random number generator which is used to draw random samples. Optional, can be null. + A log-normal distribution. + + + + Constructs a log-normal distribution with the desired mean and variance. + + The mean of the log-normal distribution. + The variance of the log-normal distribution. + The random number generator which is used to draw random samples. Optional, can be null. + A log-normal distribution. + + + + Estimates the log-normal distribution parameters from sample data with maximum-likelihood. + + The samples to estimate the distribution parameters from. + The random number generator which is used to draw random samples. Optional, can be null. + A log-normal distribution. + MATLAB: lognfit + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + + + + Gets the log-scale (μ) (mean of the logarithm) of the distribution. + + + + + Gets the shape (σ) (standard deviation of the logarithm) of the distribution. Range: σ ≥ 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mu of the log-normal distribution. + + + + + Gets the variance of the log-normal distribution. + + + + + Gets the standard deviation of the log-normal distribution. + + + + + Gets the entropy of the log-normal distribution. + + + + + Gets the skewness of the log-normal distribution. + + + + + Gets the mode of the log-normal distribution. + + + + + Gets the median of the log-normal distribution. + + + + + Gets the minimum of the log-normal distribution. + + + + + Gets the maximum of the log-normal distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Generates a sample from the log-normal distribution using the Box-Muller algorithm. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the log-normal distribution using the Box-Muller algorithm. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + the density at . + + MATLAB: lognpdf + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the density. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + the cumulative distribution at location . + + MATLAB: logncdf + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + the inverse cumulative density at . + + MATLAB: logninv + + + + Generates a sample from the log-normal distribution using the Box-Muller algorithm. + + The random number generator to use. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the log-normal distribution using the Box-Muller algorithm. + + The random number generator to use. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + Generates a sample from the log-normal distribution using the Box-Muller algorithm. + + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the log-normal distribution using the Box-Muller algorithm. + + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The log-scale (μ) of the distribution. + The shape (σ) of the distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + Multivariate Matrix-valued Normal distributions. The distribution + is parameterized by a mean matrix (M), a covariance matrix for the rows (V) and a covariance matrix + for the columns (K). If the dimension of M is d-by-m then V is d-by-d and K is m-by-m. + Wikipedia - MatrixNormal distribution. + + + + + The mean of the matrix normal distribution. + + + + + The covariance matrix for the rows. + + + + + The covariance matrix for the columns. + + + + + Initializes a new instance of the class. + + The mean of the matrix normal. + The covariance matrix for the rows. + The covariance matrix for the columns. + If the dimensions of the mean and two covariance matrices don't match. + + + + Initializes a new instance of the class. + + The mean of the matrix normal. + The covariance matrix for the rows. + The covariance matrix for the columns. + The random number generator which is used to draw random samples. + If the dimensions of the mean and two covariance matrices don't match. + + + + Returns a that represents this instance. + + + A that represents this instance. + + + + + Tests whether the provided values are valid parameters for this distribution. + + The mean of the matrix normal. + The covariance matrix for the rows. + The covariance matrix for the columns. + + + + Gets the mean. (M) + + The mean of the distribution. + + + + Gets the row covariance. (V) + + The row covariance. + + + + Gets the column covariance. (K) + + The column covariance. + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Evaluates the probability density function for the matrix normal distribution. + + The matrix at which to evaluate the density at. + the density at + If the argument does not have the correct dimensions. + + + + Samples a matrix normal distributed random variable. + + A random number from this distribution. + + + + Samples a matrix normal distributed random variable. + + The random number generator to use. + The mean of the matrix normal. + The covariance matrix for the rows. + The covariance matrix for the columns. + If the dimensions of the mean and two covariance matrices don't match. + a sequence of samples from the distribution. + + + + Samples a vector normal distributed random variable. + + The random number generator to use. + The mean of the vector normal distribution. + The covariance matrix of the vector normal distribution. + a sequence of samples from defined distribution. + + + + Multivariate Multinomial distribution. For details about this distribution, see + Wikipedia - Multinomial distribution. + + + The distribution is parameterized by a vector of ratios: in other words, the parameter + does not have to be normalized and sum to 1. The reason is that some vectors can't be exactly normalized + to sum to 1 in floating point representation. + + + + + Stores the normalized multinomial probabilities. + + + + + The number of trials. + + + + + Initializes a new instance of the Multinomial class. + + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + The number of trials. + If any of the probabilities are negative or do not sum to one. + If is negative. + + + + Initializes a new instance of the Multinomial class. + + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + The number of trials. + The random number generator which is used to draw random samples. + If any of the probabilities are negative or do not sum to one. + If is negative. + + + + Initializes a new instance of the Multinomial class from histogram . The distribution will + not be automatically updated when the histogram changes. + + Histogram instance + The number of trials. + If any of the probabilities are negative or do not sum to one. + If is negative. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + The number of trials. + If any of the probabilities are negative returns false, + if the sum of parameters is 0.0, or if the number of trials is negative; otherwise true. + + + + Gets the proportion of ratios. + + + + + Gets the number of trials. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Computes values of the probability mass function. + + Non-negative integers x1, ..., xk + The probability mass at location . + When is null. + When length of is not equal to event probabilities count. + + + + Computes values of the log probability mass function. + + Non-negative integers x1, ..., xk + The log probability mass at location . + When is null. + When length of is not equal to event probabilities count. + + + + Samples one multinomial distributed random variable. + + the counts for each of the different possible values. + + + + Samples a sequence multinomially distributed random variables. + + a sequence of counts for each of the different possible values. + + + + Samples one multinomial distributed random variable. + + The random number generator to use. + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + The number of trials. + the counts for each of the different possible values. + + + + Samples a multinomially distributed random variable. + + The random number generator to use. + An array of nonnegative ratios: this array does not need to be normalized + as this is often impossible using floating point arithmetic. + The number of variables needed. + a sequence of counts for each of the different possible values. + + + + Discrete Univariate Negative Binomial distribution. + The negative binomial is a distribution over the natural numbers with two parameters r, p. For the special + case that r is an integer one can interpret the distribution as the number of failures before the r'th success + when the probability of success is p. + Wikipedia - NegativeBinomial distribution. + + + + + Initializes a new instance of the class. + + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Initializes a new instance of the class. + + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + The random number generator which is used to draw random samples. + + + + Returns a that represents this instance. + + + A that represents this instance. + + + + + Tests whether the provided values are valid parameters for this distribution. + + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Gets the number of successes. Range: r ≥ 0. + + + + + Gets the probability of success. Range: 0 ≤ p ≤ 1. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution + + + + + Gets the median of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + the cumulative distribution at location . + + + + + Samples a negative binomial distributed random variable. + + The random number generator to use. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + a sample from the distribution. + + + + Samples a NegativeBinomial distributed random variable. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of NegativeBinomial distributed random variables. + + a sequence of samples from the distribution. + + + + Samples a random variable. + + The random number generator to use. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Samples a sequence of this random variable. + + The random number generator to use. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Samples a random variable. + + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Samples a sequence of this random variable. + + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The number of successes (r) required to stop the experiment. Range: r ≥ 0. + The probability (p) of a trial resulting in success. Range: 0 ≤ p ≤ 1. + + + + Continuous Univariate Normal distribution, also known as Gaussian distribution. + For details about this distribution, see + Wikipedia - Normal distribution. + + + + + Initializes a new instance of the Normal class. This is a normal distribution with mean 0.0 + and standard deviation 1.0. The distribution will + be initialized with the default random number generator. + + + + + Initializes a new instance of the Normal class. This is a normal distribution with mean 0.0 + and standard deviation 1.0. The distribution will + be initialized with the default random number generator. + + The random number generator which is used to draw random samples. + + + + Initializes a new instance of the Normal class with a particular mean and standard deviation. The distribution will + be initialized with the default random number generator. + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + + + + Initializes a new instance of the Normal class with a particular mean and standard deviation. The distribution will + be initialized with the default random number generator. + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + The random number generator which is used to draw random samples. + + + + Constructs a normal distribution from a mean and standard deviation. + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + The random number generator which is used to draw random samples. Optional, can be null. + a normal distribution. + + + + Constructs a normal distribution from a mean and variance. + + The mean (μ) of the normal distribution. + The variance (σ^2) of the normal distribution. + The random number generator which is used to draw random samples. Optional, can be null. + A normal distribution. + + + + Constructs a normal distribution from a mean and precision. + + The mean (μ) of the normal distribution. + The precision of the normal distribution. + The random number generator which is used to draw random samples. Optional, can be null. + A normal distribution. + + + + Estimates the normal distribution parameters from sample data with maximum-likelihood. + + The samples to estimate the distribution parameters from. + The random number generator which is used to draw random samples. Optional, can be null. + A normal distribution. + MATLAB: normfit + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + + + + Gets the mean (μ) of the normal distribution. + + + + + Gets the standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + + + + + Gets the variance of the normal distribution. + + + + + Gets the precision of the normal distribution. + + + + + Gets the random number generator which is used to draw random samples. + + + + + Gets the entropy of the normal distribution. + + + + + Gets the skewness of the normal distribution. + + + + + Gets the mode of the normal distribution. + + + + + Gets the median of the normal distribution. + + + + + Gets the minimum of the normal distribution. + + + + + Gets the maximum of the normal distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Generates a sample from the normal distribution using the Box-Muller algorithm. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the normal distribution using the Box-Muller algorithm. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + The location at which to compute the density. + the density at . + + MATLAB: normpdf + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + the cumulative distribution at location . + + MATLAB: normcdf + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + the inverse cumulative density at . + + MATLAB: norminv + + + + Generates a sample from the normal distribution using the Box-Muller algorithm. + + The random number generator to use. + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the normal distribution using the Box-Muller algorithm. + + The random number generator to use. + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + Generates a sample from the normal distribution using the Box-Muller algorithm. + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + a sample from the distribution. + + + + Generates a sequence of samples from the normal distribution using the Box-Muller algorithm. + + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The mean (μ) of the normal distribution. + The standard deviation (σ) of the normal distribution. Range: σ ≥ 0. + a sequence of samples from the distribution. + + + + This structure represents the type over which the distribution + is defined. + + + + + The mean value. + + + + + The precision value. + + + + + Initializes a new instance of the struct. + + The mean of the pair. + The precision of the pair. + + + + Gets or sets the mean of the pair. + + + + + Gets or sets the precision of the pair. + + + + + Multivariate Normal-Gamma Distribution. + The distribution is the conjugate prior distribution for the + distribution. It specifies a prior over the mean and precision of the distribution. + It is parameterized by four numbers: the mean location, the mean scale, the precision shape and the + precision inverse scale. + The distribution NG(mu, tau | mloc,mscale,psscale,pinvscale) = Normal(mu | mloc, 1/(mscale*tau)) * Gamma(tau | psscale,pinvscale). + The following degenerate cases are special: when the precision is known, + the precision shape will encode the value of the precision while the precision inverse scale is positive + infinity. When the mean is known, the mean location will encode the value of the mean while the scale + will be positive infinity. A completely degenerate NormalGamma distribution with known mean and precision is possible as well. + Wikipedia - Normal-Gamma distribution. + + + + + Initializes a new instance of the class. + + The location of the mean. + The scale of the mean. + The shape of the precision. + The inverse scale of the precision. + + + + Initializes a new instance of the class. + + The location of the mean. + The scale of the mean. + The shape of the precision. + The inverse scale of the precision. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The location of the mean. + The scale of the mean. + The shape of the precision. + The inverse scale of the precision. + + + + Gets the location of the mean. + + + + + Gets the scale of the mean. + + + + + Gets the shape of the precision. + + + + + Gets the inverse scale of the precision. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Returns the marginal distribution for the mean of the NormalGamma distribution. + + the marginal distribution for the mean of the NormalGamma distribution. + + + + Returns the marginal distribution for the precision of the distribution. + + The marginal distribution for the precision of the distribution/ + + + + Gets the mean of the distribution. + + The mean of the distribution. + + + + Gets the variance of the distribution. + + The mean of the distribution. + + + + Evaluates the probability density function for a NormalGamma distribution. + + The mean/precision pair of the distribution + Density value + + + + Evaluates the probability density function for a NormalGamma distribution. + + The mean of the distribution + The precision of the distribution + Density value + + + + Evaluates the log probability density function for a NormalGamma distribution. + + The mean/precision pair of the distribution + The log of the density value + + + + Evaluates the log probability density function for a NormalGamma distribution. + + The mean of the distribution + The precision of the distribution + The log of the density value + + + + Generates a sample from the NormalGamma distribution. + + a sample from the distribution. + + + + Generates a sequence of samples from the NormalGamma distribution + + a sequence of samples from the distribution. + + + + Generates a sample from the NormalGamma distribution. + + The random number generator to use. + The location of the mean. + The scale of the mean. + The shape of the precision. + The inverse scale of the precision. + a sample from the distribution. + + + + Generates a sequence of samples from the NormalGamma distribution + + The random number generator to use. + The location of the mean. + The scale of the mean. + The shape of the precision. + The inverse scale of the precision. + a sequence of samples from the distribution. + + + + Continuous Univariate Pareto distribution. + The Pareto distribution is a power law probability distribution that coincides with social, + scientific, geophysical, actuarial, and many other types of observable phenomena. + For details about this distribution, see + Wikipedia - Pareto distribution. + + + + + Initializes a new instance of the class. + + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + If or are negative. + + + + Initializes a new instance of the class. + + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + The random number generator which is used to draw random samples. + If or are negative. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + + + + Gets the scale (xm) of the distribution. Range: xm > 0. + + + + + Gets the shape (α) of the distribution. Range: α > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Draws a random sample from the distribution. + + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Pareto distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + the inverse cumulative density at . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The scale (xm) of the distribution. Range: xm > 0. + The shape (α) of the distribution. Range: α > 0. + a sequence of samples from the distribution. + + + + Discrete Univariate Poisson distribution. + + + Distribution is described at Wikipedia - Poisson distribution. + Knuth's method is used to generate Poisson distributed random variables. + f(x) = exp(-λ)*λ^x/x!; + + + + + Initializes a new instance of the class. + + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + If is equal or less then 0.0. + + + + Initializes a new instance of the class. + + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + The random number generator which is used to draw random samples. + If is equal or less then 0.0. + + + + Returns a that represents this instance. + + + A that represents this instance. + + + + + Tests whether the provided values are valid parameters for this distribution. + + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + + + + Gets the Poisson distribution parameter λ. Range: λ > 0. + + + + + Gets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + Approximation, see Wikipedia Poisson distribution + + + + Gets the skewness of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + Approximation, see Wikipedia Poisson distribution + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + the cumulative distribution at location . + + + + + Generates one sample from the Poisson distribution. + + The random source to use. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + A random sample from the Poisson distribution. + + + + Generates one sample from the Poisson distribution by Knuth's method. + + The random source to use. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + A random sample from the Poisson distribution. + + + + Generates one sample from the Poisson distribution by "Rejection method PA". + + The random source to use. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + A random sample from the Poisson distribution. + "Rejection method PA" from "The Computer Generation of Poisson Random Variables" by A. C. Atkinson, + Journal of the Royal Statistical Society Series C (Applied Statistics) Vol. 28, No. 1. (1979) + The article is on pages 29-35. The algorithm given here is on page 32. + + + + Samples a Poisson distributed random variable. + + A sample from the Poisson distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of Poisson distributed random variables. + + a sequence of successes in N trials. + + + + Samples a Poisson distributed random variable. + + The random number generator to use. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + A sample from the Poisson distribution. + + + + Samples a sequence of Poisson distributed random variables. + + The random number generator to use. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Samples a Poisson distributed random variable. + + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + A sample from the Poisson distribution. + + + + Samples a sequence of Poisson distributed random variables. + + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The lambda (λ) parameter of the Poisson distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Continuous Univariate Rayleigh distribution. + The Rayleigh distribution (pronounced /ˈreɪli/) is a continuous probability distribution. As an + example of how it arises, the wind speed will have a Rayleigh distribution if the components of + the two-dimensional wind velocity vector are uncorrelated and normally distributed with equal variance. + For details about this distribution, see + Wikipedia - Rayleigh distribution. + + + + + Initializes a new instance of the class. + + The scale (σ) of the distribution. Range: σ > 0. + If is negative. + + + + Initializes a new instance of the class. + + The scale (σ) of the distribution. Range: σ > 0. + The random number generator which is used to draw random samples. + If is negative. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The scale (σ) of the distribution. Range: σ > 0. + + + + Gets the scale (σ) of the distribution. Range: σ > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Draws a random sample from the distribution. + + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Rayleigh distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The scale (σ) of the distribution. Range: σ > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The scale (σ) of the distribution. Range: σ > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The scale (σ) of the distribution. Range: σ > 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The scale (σ) of the distribution. Range: σ > 0. + the inverse cumulative density at . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The scale (σ) of the distribution. Range: σ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The scale (σ) of the distribution. Range: σ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The scale (σ) of the distribution. Range: σ > 0. + a sequence of samples from the distribution. + + + + Continuous Univariate Stable distribution. + A random variable is said to be stable (or to have a stable distribution) if it has + the property that a linear combination of two independent copies of the variable has + the same distribution, up to location and scale parameters. + For details about this distribution, see + Wikipedia - Stable distribution. + + + + + Initializes a new instance of the class. + + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + + + + Initializes a new instance of the class. + + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + + + + Gets the stability (α) of the distribution. Range: 2 ≥ α > 0. + + + + + Gets The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + + + + + Gets the scale (c) of the distribution. Range: c > 0. + + + + + Gets the location (μ) of the distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets he entropy of the distribution. + + Always throws a not supported exception. + + + + Gets the skewness of the distribution. + + Throws a not supported exception of Alpha != 2. + + + + Gets the mode of the distribution. + + Throws a not supported exception if Beta != 0. + + + + Gets the median of the distribution. + + Throws a not supported exception if Beta != 0. + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + Throws a not supported exception if Alpha != 2, (Alpha != 1 and Beta !=0), or (Alpha != 0.5 and Beta != 1) + + + + Samples the distribution. + + The random number generator to use. + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + a random number from the distribution. + + + + Draws a random sample from the distribution. + + A random number from this distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Stable distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + the cumulative distribution at location . + + + + + Generates a sample from the distribution. + + The random number generator to use. + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The random number generator to use. + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + a sequence of samples from the distribution. + + + + Generates a sample from the distribution. + + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + a sample from the distribution. + + + + Generates a sequence of samples from the distribution. + + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The stability (α) of the distribution. Range: 2 ≥ α > 0. + The skewness (β) of the distribution. Range: 1 ≥ β ≥ -1. + The scale (c) of the distribution. Range: c > 0. + The location (μ) of the distribution. + a sequence of samples from the distribution. + + + + Continuous Univariate Student's T-distribution. + Implements the univariate Student t-distribution. For details about this + distribution, see + + Wikipedia - Student's t-distribution. + + We use a slightly generalized version (compared to + Wikipedia) of the Student t-distribution. Namely, one which also + parameterizes the location and scale. See the book "Bayesian Data + Analysis" by Gelman et al. for more details. + The density of the Student t-distribution p(x|mu,scale,dof) = + Gamma((dof+1)/2) (1 + (x - mu)^2 / (scale * scale * dof))^(-(dof+1)/2) / + (Gamma(dof/2)*Sqrt(dof*pi*scale)). + The distribution will use the by + default. Users can get/set the random number generator by using the + property. + The statistics classes will check all the incoming parameters + whether they are in the allowed range. This might involve heavy + computation. Optionally, by setting Control.CheckDistributionParameters + to false, all parameter checks can be turned off. + + + + Initializes a new instance of the StudentT class. This is a Student t-distribution with location 0.0 + scale 1.0 and degrees of freedom 1. + + + + + Initializes a new instance of the StudentT class with a particular location, scale and degrees of + freedom. + + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + + + + Initializes a new instance of the StudentT class with a particular location, scale and degrees of + freedom. + + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + + + + Gets the location (μ) of the Student t-distribution. + + + + + Gets the scale (σ) of the Student t-distribution. Range: σ > 0. + + + + + Gets the degrees of freedom (ν) of the Student t-distribution. Range: ν > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the Student t-distribution. + + + + + Gets the variance of the Student t-distribution. + + + + + Gets the standard deviation of the Student t-distribution. + + + + + Gets the entropy of the Student t-distribution. + + + + + Gets the skewness of the Student t-distribution. + + + + + Gets the mode of the Student t-distribution. + + + + + Gets the median of the Student t-distribution. + + + + + Gets the minimum of the Student t-distribution. + + + + + Gets the maximum of the Student t-distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Samples student-t distributed random variables. + + The algorithm is method 2 in section 5, chapter 9 + in L. Devroye's "Non-Uniform Random Variate Generation" + The random number generator to use. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + a random number from the standard student-t distribution. + + + + Generates a sample from the Student t-distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Student t-distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + the inverse cumulative density at . + + WARNING: currently not an explicit implementation, hence slow and unreliable. + + + + Generates a sample from the Student t-distribution. + + The random number generator to use. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the Student t-distribution using the Box-Muller algorithm. + + The random number generator to use. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the Student t-distribution. + + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the Student t-distribution using the Box-Muller algorithm. + + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The location (μ) of the distribution. + The scale (σ) of the distribution. Range: σ > 0. + The degrees of freedom (ν) for the distribution. Range: ν > 0. + a sequence of samples from the distribution. + + + + Triangular distribution. + For details, see Wikipedia - Triangular distribution. + + The distribution will use the by default. + Users can get/set the random number generator by using the property. + The statistics classes will check whether all the incoming parameters are in the allowed range. This might involve heavy computation. Optionally, by setting Control.CheckDistributionParameters + to false, all parameter checks can be turned off. + + + + Initializes a new instance of the Triangular class with the given lower bound, upper bound and mode. + + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + If the upper bound is smaller than the mode or if the mode is smaller than the lower bound. + + + + Initializes a new instance of the Triangular class with the given lower bound, upper bound and mode. + + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + The random number generator which is used to draw random samples. + If the upper bound is smaller than the mode or if the mode is smaller than the lower bound. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + + + + Gets the lower bound of the distribution. + + + + + Gets the upper bound of the distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + + Gets the skewness of the distribution. + + + + + Gets or sets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + + Gets the minimum of the distribution. + + + + + Gets the maximum of the distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + the inverse cumulative density at . + + + + + Generates a sample from the Triangular distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Triangular distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + the cumulative distribution at location . + + + + + Computes the inverse of the cumulative distribution function (InvCDF) for the distribution + at the given probability. This is also known as the quantile or percent point function. + + The location at which to compute the inverse cumulative density. + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + the inverse cumulative density at . + + + + + Generates a sample from the Triangular distribution. + + The random number generator to use. + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + a sample from the distribution. + + + + Generates a sequence of samples from the Triangular distribution. + + The random number generator to use. + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + a sequence of samples from the distribution. + + + + Generates a sample from the Triangular distribution. + + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + a sample from the distribution. + + + + Generates a sequence of samples from the Triangular distribution. + + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + Lower bound. Range: lower ≤ mode ≤ upper + Upper bound. Range: lower ≤ mode ≤ upper + Mode (most frequent value). Range: lower ≤ mode ≤ upper + a sequence of samples from the distribution. + + + + Continuous Univariate Weibull distribution. + For details about this distribution, see + Wikipedia - Weibull distribution. + + + The Weibull distribution is parametrized by a shape and scale parameter. + + + + + Reusable intermediate result 1 / (_scale ^ _shape) + + + By caching this parameter we can get slightly better numerics precision + in certain constellations without any additional computations. + + + + + Initializes a new instance of the Weibull class. + + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + + + + Initializes a new instance of the Weibull class. + + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + + + + Gets the shape (k) of the Weibull distribution. Range: k > 0. + + + + + Gets the scale (λ) of the Weibull distribution. Range: λ > 0. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the Weibull distribution. + + + + + Gets the variance of the Weibull distribution. + + + + + Gets the standard deviation of the Weibull distribution. + + + + + Gets the entropy of the Weibull distribution. + + + + + Gets the skewness of the Weibull distribution. + + + + + Gets the mode of the Weibull distribution. + + + + + Gets the median of the Weibull distribution. + + + + + Gets the minimum of the Weibull distribution. + + + + + Gets the maximum of the Weibull distribution. + + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The location at which to compute the density. + the density at . + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The location at which to compute the log density. + the log density at . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Generates a sample from the Weibull distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Generates a sequence of samples from the Weibull distribution. + + a sequence of samples from the distribution. + + + + Computes the probability density of the distribution (PDF) at x, i.e. ∂P(X ≤ x)/∂x. + + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + The location at which to compute the density. + the density at . + + + + + Computes the log probability density of the distribution (lnPDF) at x, i.e. ln(∂P(X ≤ x)/∂x). + + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + The location at which to compute the density. + the log density at . + + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + the cumulative distribution at location . + + + + + Implemented according to: Parameter estimation of the Weibull probability distribution, 1994, Hongzhu Qiao, Chris P. Tsokos + + + + Returns a Weibull distribution. + + + + Generates a sample from the Weibull distribution. + + The random number generator to use. + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the Weibull distribution. + + The random number generator to use. + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Generates a sample from the Weibull distribution. + + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + a sample from the distribution. + + + + Generates a sequence of samples from the Weibull distribution. + + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The shape (k) of the Weibull distribution. Range: k > 0. + The scale (λ) of the Weibull distribution. Range: λ > 0. + a sequence of samples from the distribution. + + + + Multivariate Wishart distribution. This distribution is + parameterized by the degrees of freedom nu and the scale matrix S. The Wishart distribution + is the conjugate prior for the precision (inverse covariance) matrix of the multivariate + normal distribution. + Wikipedia - Wishart distribution. + + + + + The degrees of freedom for the Wishart distribution. + + + + + The scale matrix for the Wishart distribution. + + + + + Caches the Cholesky factorization of the scale matrix. + + + + + Initializes a new instance of the class. + + The degrees of freedom (n) for the Wishart distribution. + The scale matrix (V) for the Wishart distribution. + + + + Initializes a new instance of the class. + + The degrees of freedom (n) for the Wishart distribution. + The scale matrix (V) for the Wishart distribution. + The random number generator which is used to draw random samples. + + + + Tests whether the provided values are valid parameters for this distribution. + + The degrees of freedom (n) for the Wishart distribution. + The scale matrix (V) for the Wishart distribution. + + + + Gets or sets the degrees of freedom (n) for the Wishart distribution. + + + + + Gets or sets the scale matrix (V) for the Wishart distribution. + + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + The mean of the distribution. + + + + Gets the mode of the distribution. + + The mode of the distribution. + + + + Gets the variance of the distribution. + + The variance of the distribution. + + + + Evaluates the probability density function for the Wishart distribution. + + The matrix at which to evaluate the density at. + If the argument does not have the same dimensions as the scale matrix. + the density at . + + + + Samples a Wishart distributed random variable using the method + Algorithm AS 53: Wishart Variate Generator + W. B. Smith and R. R. Hocking + Applied Statistics, Vol. 21, No. 3 (1972), pp. 341-345 + + A random number from this distribution. + + + + Samples a Wishart distributed random variable using the method + Algorithm AS 53: Wishart Variate Generator + W. B. Smith and R. R. Hocking + Applied Statistics, Vol. 21, No. 3 (1972), pp. 341-345 + + The random number generator to use. + The degrees of freedom (n) for the Wishart distribution. + The scale matrix (V) for the Wishart distribution. + a sequence of samples from the distribution. + + + + Samples the distribution. + + The random number generator to use. + The degrees of freedom (n) for the Wishart distribution. + The scale matrix (V) for the Wishart distribution. + The cholesky decomposition to use. + a random number from the distribution. + + + + Discrete Univariate Zipf distribution. + Zipf's law, an empirical law formulated using mathematical statistics, refers to the fact + that many types of data studied in the physical and social sciences can be approximated with + a Zipfian distribution, one of a family of related discrete power law probability distributions. + For details about this distribution, see + Wikipedia - Zipf distribution. + + + + + The s parameter of the distribution. + + + + + The n parameter of the distribution. + + + + + Initializes a new instance of the class. + + The s parameter of the distribution. + The n parameter of the distribution. + + + + Initializes a new instance of the class. + + The s parameter of the distribution. + The n parameter of the distribution. + The random number generator which is used to draw random samples. + + + + A string representation of the distribution. + + a string representation of the distribution. + + + + Tests whether the provided values are valid parameters for this distribution. + + The s parameter of the distribution. + The n parameter of the distribution. + + + + Gets or sets the s parameter of the distribution. + + + + + Gets or sets the n parameter of the distribution. + + + + + Gets or sets the random number generator which is used to draw random samples. + + + + + Gets the mean of the distribution. + + + + + Gets the variance of the distribution. + + + + + Gets the standard deviation of the distribution. + + + + + Gets the entropy of the distribution. + + + + + Gets the skewness of the distribution. + + + + + Gets the mode of the distribution. + + + + + Gets the median of the distribution. + + + + + Gets the smallest element in the domain of the distributions which can be represented by an integer. + + + + + Gets the largest element in the domain of the distributions which can be represented by an integer. + + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + the cumulative distribution at location . + + + + Computes the probability mass (PMF) at k, i.e. P(X = k). + + The location in the domain where we want to evaluate the probability mass function. + The s parameter of the distribution. + The n parameter of the distribution. + the probability mass at location . + + + + Computes the log probability mass (lnPMF) at k, i.e. ln(P(X = k)). + + The location in the domain where we want to evaluate the log probability mass function. + The s parameter of the distribution. + The n parameter of the distribution. + the log probability mass at location . + + + + Computes the cumulative distribution (CDF) of the distribution at x, i.e. P(X ≤ x). + + The location at which to compute the cumulative distribution function. + The s parameter of the distribution. + The n parameter of the distribution. + the cumulative distribution at location . + + + + + Generates a sample from the Zipf distribution without doing parameter checking. + + The random number generator to use. + The s parameter of the distribution. + The n parameter of the distribution. + a random number from the Zipf distribution. + + + + Draws a random sample from the distribution. + + a sample from the distribution. + + + + Fills an array with samples generated from the distribution. + + + + + Samples an array of zipf distributed random variables. + + a sequence of samples from the distribution. + + + + Samples a random variable. + + The random number generator to use. + The s parameter of the distribution. + The n parameter of the distribution. + + + + Samples a sequence of this random variable. + + The random number generator to use. + The s parameter of the distribution. + The n parameter of the distribution. + + + + Fills an array with samples generated from the distribution. + + The random number generator to use. + The array to fill with the samples. + The s parameter of the distribution. + The n parameter of the distribution. + + + + Samples a random variable. + + The s parameter of the distribution. + The n parameter of the distribution. + + + + Samples a sequence of this random variable. + + The s parameter of the distribution. + The n parameter of the distribution. + + + + Fills an array with samples generated from the distribution. + + The array to fill with the samples. + The s parameter of the distribution. + The n parameter of the distribution. + + + + Integer number theory functions. + + + + + Canonical Modulus. The result has the sign of the divisor. + + + + + Canonical Modulus. The result has the sign of the divisor. + + + + + Canonical Modulus. The result has the sign of the divisor. + + + + + Canonical Modulus. The result has the sign of the divisor. + + + + + Canonical Modulus. The result has the sign of the divisor. + + + + + Remainder (% operator). The result has the sign of the dividend. + + + + + Remainder (% operator). The result has the sign of the dividend. + + + + + Remainder (% operator). The result has the sign of the dividend. + + + + + Remainder (% operator). The result has the sign of the dividend. + + + + + Remainder (% operator). The result has the sign of the dividend. + + + + + Find out whether the provided 32 bit integer is an even number. + + The number to very whether it's even. + True if and only if it is an even number. + + + + Find out whether the provided 64 bit integer is an even number. + + The number to very whether it's even. + True if and only if it is an even number. + + + + Find out whether the provided 32 bit integer is an odd number. + + The number to very whether it's odd. + True if and only if it is an odd number. + + + + Find out whether the provided 64 bit integer is an odd number. + + The number to very whether it's odd. + True if and only if it is an odd number. + + + + Find out whether the provided 32 bit integer is a perfect power of two. + + The number to very whether it's a power of two. + True if and only if it is a power of two. + + + + Find out whether the provided 64 bit integer is a perfect power of two. + + The number to very whether it's a power of two. + True if and only if it is a power of two. + + + + Find out whether the provided 32 bit integer is a perfect square, i.e. a square of an integer. + + The number to very whether it's a perfect square. + True if and only if it is a perfect square. + + + + Find out whether the provided 64 bit integer is a perfect square, i.e. a square of an integer. + + The number to very whether it's a perfect square. + True if and only if it is a perfect square. + + + + Raises 2 to the provided integer exponent (0 <= exponent < 31). + + The exponent to raise 2 up to. + 2 ^ exponent. + + + + + Raises 2 to the provided integer exponent (0 <= exponent < 63). + + The exponent to raise 2 up to. + 2 ^ exponent. + + + + + Evaluate the binary logarithm of an integer number. + + Two-step method using a De Bruijn-like sequence table lookup. + + + + Find the closest perfect power of two that is larger or equal to the provided + 32 bit integer. + + The number of which to find the closest upper power of two. + A power of two. + + + + + Find the closest perfect power of two that is larger or equal to the provided + 64 bit integer. + + The number of which to find the closest upper power of two. + A power of two. + + + + + Returns the greatest common divisor (gcd) of two integers using Euclid's algorithm. + + First Integer: a. + Second Integer: b. + Greatest common divisor gcd(a,b) + + + + Returns the greatest common divisor (gcd) of a set of integers using Euclid's + algorithm. + + List of Integers. + Greatest common divisor gcd(list of integers) + + + + Returns the greatest common divisor (gcd) of a set of integers using Euclid's algorithm. + + List of Integers. + Greatest common divisor gcd(list of integers) + + + + Computes the extended greatest common divisor, such that a*x + b*y = gcd(a,b). + + First Integer: a. + Second Integer: b. + Resulting x, such that a*x + b*y = gcd(a,b). + Resulting y, such that a*x + b*y = gcd(a,b) + Greatest common divisor gcd(a,b) + + + long x,y,d; + d = Fn.GreatestCommonDivisor(45,18,out x, out y); + -> d == 9 && x == 1 && y == -2 + + The gcd of 45 and 18 is 9: 18 = 2*9, 45 = 5*9. 9 = 1*45 -2*18, therefore x=1 and y=-2. + + + + + Returns the least common multiple (lcm) of two integers using Euclid's algorithm. + + First Integer: a. + Second Integer: b. + Least common multiple lcm(a,b) + + + + Returns the least common multiple (lcm) of a set of integers using Euclid's algorithm. + + List of Integers. + Least common multiple lcm(list of integers) + + + + Returns the least common multiple (lcm) of a set of integers using Euclid's algorithm. + + List of Integers. + Least common multiple lcm(list of integers) + + + + Returns the greatest common divisor (gcd) of two big integers. + + First Integer: a. + Second Integer: b. + Greatest common divisor gcd(a,b) + + + + Returns the greatest common divisor (gcd) of a set of big integers. + + List of Integers. + Greatest common divisor gcd(list of integers) + + + + Returns the greatest common divisor (gcd) of a set of big integers. + + List of Integers. + Greatest common divisor gcd(list of integers) + + + + Computes the extended greatest common divisor, such that a*x + b*y = gcd(a,b). + + First Integer: a. + Second Integer: b. + Resulting x, such that a*x + b*y = gcd(a,b). + Resulting y, such that a*x + b*y = gcd(a,b) + Greatest common divisor gcd(a,b) + + + long x,y,d; + d = Fn.GreatestCommonDivisor(45,18,out x, out y); + -> d == 9 && x == 1 && y == -2 + + The gcd of 45 and 18 is 9: 18 = 2*9, 45 = 5*9. 9 = 1*45 -2*18, therefore x=1 and y=-2. + + + + + Returns the least common multiple (lcm) of two big integers. + + First Integer: a. + Second Integer: b. + Least common multiple lcm(a,b) + + + + Returns the least common multiple (lcm) of a set of big integers. + + List of Integers. + Least common multiple lcm(list of integers) + + + + Returns the least common multiple (lcm) of a set of big integers. + + List of Integers. + Least common multiple lcm(list of integers) + + + + Collection of functions equivalent to those provided by Microsoft Excel + but backed instead by Math.NET Numerics. + We do not recommend to use them except in an intermediate phase when + porting over solutions previously implemented in Excel. + + + + + An algorithm failed to converge. + + + + + An algorithm failed to converge due to a numerical breakdown. + + + + + An error occured calling native provider function. + + + + + An error occured calling native provider function. + + + + + Native provider was unable to allocate sufficent memory. + + + + + Native provider failed LU inversion do to a singular U matrix. + + + + + Compound Monthly Return or Geometric Return or Annualized Return + + + + + Average Gain or Gain Mean + This is a simple average (arithmetic mean) of the periods with a gain. It is calculated by summing the returns for gain periods (return 0) + and then dividing the total by the number of gain periods. + + http://www.offshore-library.com/kb/statistics.php + + + + Average Loss or LossMean + This is a simple average (arithmetic mean) of the periods with a loss. It is calculated by summing the returns for loss periods (return < 0) + and then dividing the total by the number of loss periods. + + http://www.offshore-library.com/kb/statistics.php + + + + Calculation is similar to Standard Deviation , except it calculates an average (mean) return only for periods with a gain + and measures the variation of only the gain periods around the gain mean. Measures the volatility of upside performance. + © Copyright 1996, 1999 Gary L.Gastineau. First Edition. © 1992 Swiss Bank Corporation. + + + + + Similar to standard deviation, except this statistic calculates an average (mean) return for only the periods with a loss and then + measures the variation of only the losing periods around this loss mean. This statistic measures the volatility of downside performance. + + http://www.offshore-library.com/kb/statistics.php + + + + This measure is similar to the loss standard deviation except the downside deviation + considers only returns that fall below a defined minimum acceptable return (MAR) rather than the arithmetic mean. + For example, if the MAR is 7%, the downside deviation would measure the variation of each period that falls below + 7%. (The loss standard deviation, on the other hand, would take only losing periods, calculate an average return for + the losing periods, and then measure the variation between each losing return and the losing return average). + + + + + A measure of volatility in returns below the mean. It's similar to standard deviation, but it only + looks at periods where the investment return was less than average return. + + + + + Measures a fund’s average gain in a gain period divided by the fund’s average loss in a losing + period. Periods can be monthly or quarterly depending on the data frequency. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Example: 1e-14. + Maximum number of iterations. Example: 100. + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first derivative of the function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Example: 1e-14. + Maximum number of iterations. Example: 100. + + + + Find both complex roots of the quadratic equation c + b*x + a*x^2 = 0. + Note the special coefficient order ascending by exponent (consistent with polynomials). + + + + + Find all three complex roots of the cubic equation d + c*x + b*x^2 + a*x^3 = 0. + Note the special coefficient order ascending by exponent (consistent with polynomials). + + + + + Find all roots of the Chebychev polynomial of the first kind. + + The polynomial order and therefore the number of roots. + The real domain interval begin where to start sampling. + The real domain interval end where to stop sampling. + Samples in [a,b] at (b+a)/2+(b-1)/2*cos(pi*(2i-1)/(2n)) + + + + Find all roots of the Chebychev polynomial of the second kind. + + The polynomial order and therefore the number of roots. + The real domain interval begin where to start sampling. + The real domain interval end where to stop sampling. + Samples in [a,b] at (b+a)/2+(b-1)/2*cos(pi*i/(n-1)) + + + + Least-Squares Curve Fitting Routines + + + + + Least-Squares fitting the points (x,y) to a line y : x -> a+b*x, + returning its best fitting parameters as [a, b] array, + where a is the intercept and b the slope. + + + + + Least-Squares fitting the points (x,y) to a line y : x -> a+b*x, + returning a function y' for the best fitting line. + + + + + Least-Squares fitting the points (X,y) = ((x0,x1,..,xk),y) to a linear surface y : X -> p0*x0 + p1*x1 + ... + pk*xk, + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + If an intercept is added, its coefficient will be prepended to the resulting parameters. + + + + + Least-Squares fitting the points (X,y) = ((x0,x1,..,xk),y) to a linear surface y : X -> p0*x0 + p1*x1 + ... + pk*xk, + returning a function y' for the best fitting combination. + If an intercept is added, its coefficient will be prepended to the resulting parameters. + + + + + Weighted Least-Squares fitting the points (X,y) = ((x0,x1,..,xk),y) and weights w to a linear surface y : X -> p0*x0 + p1*x1 + ... + pk*xk, + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + + + + + Least-Squares fitting the points (x,y) to a k-order polynomial y : x -> p0 + p1*x + p2*x^2 + ... + pk*x^k, + returning its best fitting parameters as [p0, p1, p2, ..., pk] array, compatible with Evaluate.Polynomial. + A polynomial with order/degree k has (k+1) coefficients and thus requires at least (k+1) samples. + + + + + Least-Squares fitting the points (x,y) to a k-order polynomial y : x -> p0 + p1*x + p2*x^2 + ... + pk*x^k, + returning a function y' for the best fitting polynomial. + A polynomial with order/degree k has (k+1) coefficients and thus requires at least (k+1) samples. + + + + + Weighted Least-Squares fitting the points (x,y) and weights w to a k-order polynomial y : x -> p0 + p1*x + p2*x^2 + ... + pk*x^k, + returning its best fitting parameters as [p0, p1, p2, ..., pk] array, compatible with Evaluate.Polynomial. + A polynomial with order/degree k has (k+1) coefficients and thus requires at least (k+1) samples. + + + + + Least-Squares fitting the points (x,y) to an arbitrary linear combination y : x -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + + + + + Least-Squares fitting the points (x,y) to an arbitrary linear combination y : x -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning a function y' for the best fitting combination. + + + + + Least-Squares fitting the points (x,y) to an arbitrary linear combination y : x -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + + + + + Least-Squares fitting the points (x,y) to an arbitrary linear combination y : x -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning a function y' for the best fitting combination. + + + + + Least-Squares fitting the points (X,y) = ((x0,x1,..,xk),y) to an arbitrary linear combination y : X -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + + + + + Least-Squares fitting the points (X,y) = ((x0,x1,..,xk),y) to an arbitrary linear combination y : X -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning a function y' for the best fitting combination. + + + + + Least-Squares fitting the points (X,y) = ((x0,x1,..,xk),y) to an arbitrary linear combination y : X -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + + + + + Least-Squares fitting the points (X,y) = ((x0,x1,..,xk),y) to an arbitrary linear combination y : X -> p0*f0(x) + p1*f1(x) + ... + pk*fk(x), + returning a function y' for the best fitting combination. + + + + + Least-Squares fitting the points (T,y) = (T,y) to an arbitrary linear combination y : X -> p0*f0(T) + p1*f1(T) + ... + pk*fk(T), + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + + + + + Least-Squares fitting the points (T,y) = (T,y) to an arbitrary linear combination y : X -> p0*f0(T) + p1*f1(T) + ... + pk*fk(T), + returning a function y' for the best fitting combination. + + + + + Least-Squares fitting the points (T,y) = (T,y) to an arbitrary linear combination y : X -> p0*f0(T) + p1*f1(T) + ... + pk*fk(T), + returning its best fitting parameters as [p0, p1, p2, ..., pk] array. + + + + + Least-Squares fitting the points (T,y) = (T,y) to an arbitrary linear combination y : X -> p0*f0(T) + p1*f1(T) + ... + pk*fk(T), + returning a function y' for the best fitting combination. + + + + + Generate samples by sampling a function at the provided points. + + + + + Generate a sample sequence by sampling a function at the provided point sequence. + + + + + Generate samples by sampling a function at the provided points. + + + + + Generate a sample sequence by sampling a function at the provided point sequence. + + + + + Generate a linearly spaced sample vector of the given length between the specified values (inclusive). + Equivalent to MATLAB linspace but with the length as first instead of last argument. + + + + + Generate samples by sampling a function at linearly spaced points between the specified values (inclusive). + + + + + Generate a base 10 logarithmically spaced sample vector of the given length between the specified decade exponents (inclusive). + Equivalent to MATLAB logspace but with the length as first instead of last argument. + + + + + Generate samples by sampling a function at base 10 logarithmically spaced points between the specified decade exponents (inclusive). + + + + + Generate a linearly spaced sample vector within the inclusive interval (start, stop) and step 1. + Equivalent to MATLAB colon operator (:). + + + + + Generate a linearly spaced sample vector within the inclusive interval (start, stop) and step 1. + Equivalent to MATLAB colon operator (:). + + + + + Generate a linearly spaced sample vector within the inclusive interval (start, stop) and the provided step. + The start value is aways included as first value, but stop is only included if it stop-start is a multiple of step. + Equivalent to MATLAB double colon operator (::). + + + + + Generate a linearly spaced sample vector within the inclusive interval (start, stop) and the provided step. + The start value is aways included as first value, but stop is only included if it stop-start is a multiple of step. + Equivalent to MATLAB double colon operator (::). + + + + + Generate a linearly spaced sample vector within the inclusive interval (start, stop) and the provide step. + The start value is aways included as first value, but stop is only included if it stop-start is a multiple of step. + Equivalent to MATLAB double colon operator (::). + + + + + Generate samples by sampling a function at linearly spaced points within the inclusive interval (start, stop) and the provide step. + The start value is aways included as first value, but stop is only included if it stop-start is a multiple of step. + + + + + Create a periodic wave. + + The number of samples to generate. + Samples per time unit (Hz). Must be larger than twice the frequency to satisfy the Nyquist criterion. + Frequency in periods per time unit (Hz). + The length of the period when sampled at one sample per time unit. This is the interval of the periodic domain, a typical value is 1.0, or 2*Pi for angular functions. + Optional phase offset. + Optional delay, relative to the phase. + + + + Create a periodic wave. + + The number of samples to generate. + The function to apply to each of the values and evaluate the resulting sample. + Samples per time unit (Hz). Must be larger than twice the frequency to satisfy the Nyquist criterion. + Frequency in periods per time unit (Hz). + The length of the period when sampled at one sample per time unit. This is the interval of the periodic domain, a typical value is 1.0, or 2*Pi for angular functions. + Optional phase offset. + Optional delay, relative to the phase. + + + + Create an infinite periodic wave sequence. + + Samples per time unit (Hz). Must be larger than twice the frequency to satisfy the Nyquist criterion. + Frequency in periods per time unit (Hz). + The length of the period when sampled at one sample per time unit. This is the interval of the periodic domain, a typical value is 1.0, or 2*Pi for angular functions. + Optional phase offset. + Optional delay, relative to the phase. + + + + Create an infinite periodic wave sequence. + + The function to apply to each of the values and evaluate the resulting sample. + Samples per time unit (Hz). Must be larger than twice the frequency to satisfy the Nyquist criterion. + Frequency in periods per time unit (Hz). + The length of the period when sampled at one sample per time unit. This is the interval of the periodic domain, a typical value is 1.0, or 2*Pi for angular functions. + Optional phase offset. + Optional delay, relative to the phase. + + + + Create a Sine wave. + + The number of samples to generate. + Samples per time unit (Hz). Must be larger than twice the frequency to satisfy the Nyquist criterion. + Frequency in periods per time unit (Hz). + The maximal reached peak. + The mean, or DC part, of the signal. + Optional phase offset. + Optional delay, relative to the phase. + + + + Create an infinite Sine wave sequence. + + Samples per unit. + Frequency in samples per unit. + The maximal reached peak. + The mean, or DC part, of the signal. + Optional phase offset. + Optional delay, relative to the phase. + + + + Create a periodic square wave, starting with the high phase. + + The number of samples to generate. + Number of samples of the high phase. + Number of samples of the low phase. + Sample value to be emitted during the low phase. + Sample value to be emitted during the high phase. + Optional delay. + + + + Create an infinite periodic square wave sequence, starting with the high phase. + + Number of samples of the high phase. + Number of samples of the low phase. + Sample value to be emitted during the low phase. + Sample value to be emitted during the high phase. + Optional delay. + + + + Create a periodic triangle wave, starting with the raise phase from the lowest sample. + + The number of samples to generate. + Number of samples of the raise phase. + Number of samples of the fall phase. + Lowest sample value. + Highest sample value. + Optional delay. + + + + Create an infinite periodic triangle wave sequence, starting with the raise phase from the lowest sample. + + Number of samples of the raise phase. + Number of samples of the fall phase. + Lowest sample value. + Highest sample value. + Optional delay. + + + + Create a periodic sawtooth wave, starting with the lowest sample. + + The number of samples to generate. + Number of samples a full sawtooth period. + Lowest sample value. + Highest sample value. + Optional delay. + + + + Create an infinite periodic sawtooth wave sequence, starting with the lowest sample. + + Number of samples a full sawtooth period. + Lowest sample value. + Highest sample value. + Optional delay. + + + + Create an array with each field set to the same value. + + The number of samples to generate. + The value that each field should be set to. + + + + Create an infinite sequence where each element has the same value. + + The value that each element should be set to. + + + + Create a Heaviside Step sample vector. + + The number of samples to generate. + The maximal reached peak. + Offset to the time axis. + + + + Create an infinite Heaviside Step sample sequence. + + The maximal reached peak. + Offset to the time axis. + + + + Create a Kronecker Delta impulse sample vector. + + The number of samples to generate. + The maximal reached peak. + Offset to the time axis. Zero or positive. + + + + Create a Kronecker Delta impulse sample vector. + + The maximal reached peak. + Offset to the time axis, hence the sample index of the impulse. + + + + Create a periodic Kronecker Delta impulse sample vector. + + The number of samples to generate. + impulse sequence period. + The maximal reached peak. + Offset to the time axis. Zero or positive. + + + + Create a Kronecker Delta impulse sample vector. + + impulse sequence period. + The maximal reached peak. + Offset to the time axis. Zero or positive. + + + + Generate samples generated by the given computation. + + + + + Generate an infinite sequence generated by the given computation. + + + + + Generate a Fibonacci sequence, including zero as first value. + + + + + Generate an infinite Fibonacci sequence, including zero as first value. + + + + + Create random samples, uniform between 0 and 1. + Faster than other methods but with reduced guarantees on randomness. + + + + + Create an infinite random sample sequence, uniform between 0 and 1. + Faster than other methods but with reduced guarantees on randomness. + + + + + Generate samples by sampling a function at samples from a probability distribution, uniform between 0 and 1. + Faster than other methods but with reduced guarantees on randomness. + + + + + Generate a sample sequence by sampling a function at samples from a probability distribution, uniform between 0 and 1. + Faster than other methods but with reduced guarantees on randomness. + + + + + Generate samples by sampling a function at sample pairs from a probability distribution, uniform between 0 and 1. + Faster than other methods but with reduced guarantees on randomness. + + + + + Generate a sample sequence by sampling a function at sample pairs from a probability distribution, uniform between 0 and 1. + Faster than other methods but with reduced guarantees on randomness. + + + + + Create samples with independent amplitudes of standard distribution. + + + + + Create an infinite sample sequence with independent amplitudes of standard distribution. + + + + + Create samples with independent amplitudes of normal distribution and a flat spectral density. + + + + + Create an infinite sample sequence with independent amplitudes of normal distribution and a flat spectral density. + + + + + Create samples with independent amplitudes of normal distribution and a flat spectral density. + + + + + Create an infinite sample sequence with independent amplitudes of normal distribution and a flat spectral density. + + + + + Create skew alpha stable samples. + + The number of samples to generate. + Stability alpha-parameter of the stable distribution + Skewness beta-parameter of the stable distribution + Scale c-parameter of the stable distribution + Location mu-parameter of the stable distribution + + + + Create skew alpha stable samples. + + Stability alpha-parameter of the stable distribution + Skewness beta-parameter of the stable distribution + Scale c-parameter of the stable distribution + Location mu-parameter of the stable distribution + + + + Create random samples. + + + + + Create an infinite random sample sequence. + + + + + Create random samples. + + + + + Create an infinite random sample sequence. + + + + + Create random samples. + + + + + Create an infinite random sample sequence. + + + + + Create random samples. + + + + + Create an infinite random sample sequence. + + + + + Generate samples by sampling a function at samples from a probability distribution. + + + + + Generate a sample sequence by sampling a function at samples from a probability distribution. + + + + + Generate samples by sampling a function at sample pairs from a probability distribution. + + + + + Generate a sample sequence by sampling a function at sample pairs from a probability distribution. + + + + + Globalized String Handling Helpers + + + + + Tries to get a from the format provider, + returning the current culture if it fails. + + + An that supplies culture-specific + formatting information. + + A instance. + + + + Tries to get a from the format + provider, returning the current culture if it fails. + + + An that supplies culture-specific + formatting information. + + A instance. + + + + Tries to get a from the format provider, returning the current culture if it fails. + + + An that supplies culture-specific + formatting information. + + A instance. + + + + Globalized Parsing: Tokenize a node by splitting it into several nodes. + + Node that contains the trimmed string to be tokenized. + List of keywords to tokenize by. + keywords to skip looking for (because they've already been handled). + + + + Globalized Parsing: Parse a double number + + First token of the number. + The parsed double number using the current culture information. + + + + + Globalized Parsing: Parse a float number + + First token of the number. + The parsed float number using the current culture information. + + + + + Calculates the R-Squared value, also known as coefficient of determination, + given modelled and observed values + + The values expected from the modelled + The actual data set values obtained + Squared Person product-momentum correlation coefficient. + + + + Calculates the R value, also known as linear correlation coefficient, + given modelled and observed values + + The values expected from the modelled + The actual data set values obtained + Person product-momentum correlation coefficient. + + + + Calculates the Standard Error of the regression, given a sequence of + modeled/predicted values, and a sequence of actual/observed values + + The modelled/predicted values + The observed/actual values + The Standard Error of the regression + + + + Calculates the Standard Error of the regression, given a sequence of + modeled/predicted values, and a sequence of actual/observed values + + The modelled/predicted values + The observed/actual values + The degrees of freedom by which the + number of samples is reduced for performing the Standard Error calculation + The Standard Error of the regression + + + + Complex Fast (FFT) Implementation of the Discrete Fourier Transform (DFT). + + + Complex Fast (FFT) Implementation of the Discrete Fourier Transform (DFT). + + + Complex Fast (FFT) Implementation of the Discrete Fourier Transform (DFT). + + + Complex Fast (FFT) Implementation of the Discrete Fourier Transform (DFT). + + + + + Sequences with length greater than Math.Sqrt(Int32.MaxValue) + 1 + will cause k*k in the Bluestein sequence to overflow (GH-286). + + + + + Generate the bluestein sequence for the provided problem size. + + Number of samples. + Bluestein sequence exp(I*Pi*k^2/N) + + + + Convolution with the bluestein sequence (Parallel Version). + + Sample Vector. + + + + Swap the real and imaginary parts of each sample. + + Sample Vector. + + + + Bluestein generic FFT for arbitrary sized sample vectors. + + Time-space sample vector. + Fourier series exponent sign. + + + + Applies the forward Fast Fourier Transform (FFT) to arbitrary-length sample vectors. + + Sample vector, where the FFT is evaluated in place. + + + + Applies the forward Fast Fourier Transform (FFT) to arbitrary-length sample vectors. + + Sample vector, where the FFT is evaluated in place. + Fourier Transform Convention Options. + + + + Applies the forward Fast Fourier Transform (FFT) to arbitrary-length sample vectors. + + Real part of the sample vector, where the FFT is evaluated in place. + Imaginary part of the sample vector, where the FFT is evaluated in place. + Fourier Transform Convention Options. + + + + Packed Real-Complex forward Fast Fourier Transform (FFT) to arbitrary-length sample vectors. + Since for real-valued time samples the complex spectrum is conjugate-even (symmetry), + the spectrum can be fully reconstructed form the positive frequencies only (first half). + The data array needs to be N+2 (if N is even) or N+1 (if N is odd) long in order to support such a packed spectrum. + + Data array of length N+2 (if N is even) or N+1 (if N is odd). + The number of samples. + Fourier Transform Convention Options. + + + + Applies the forward Fast Fourier Transform (FFT) to multiple dimensional sample data. + + Sample data, where the FFT is evaluated in place. + + The data size per dimension. The first dimension is the major one. + For example, with two dimensions "rows" and "columns" the samples are assumed to be organized row by row. + + Fourier Transform Convention Options. + + + + Applies the forward Fast Fourier Transform (FFT) to two dimensional sample data. + + Sample data, organized row by row, where the FFT is evaluated in place + The number of rows. + The number of columns. + Data available organized column by column instead of row by row can be processed directly by swapping the rows and columns arguments. + Fourier Transform Convention Options. + + + + Applies the forward Fast Fourier Transform (FFT) to a two dimensional data in form of a matrix. + + Sample matrix, where the FFT is evaluated in place + Fourier Transform Convention Options. + + + + Applies the inverse Fast Fourier Transform (iFFT) to arbitrary-length sample vectors. + + Spectrum data, where the iFFT is evaluated in place. + + + + Applies the inverse Fast Fourier Transform (iFFT) to arbitrary-length sample vectors. + + Spectrum data, where the iFFT is evaluated in place. + Fourier Transform Convention Options. + + + + Applies the inverse Fast Fourier Transform (iFFT) to arbitrary-length sample vectors. + + Real part of the sample vector, where the iFFT is evaluated in place. + Imaginary part of the sample vector, where the iFFT is evaluated in place. + Fourier Transform Convention Options. + + + + Packed Real-Complex inverse Fast Fourier Transform (iFFT) to arbitrary-length sample vectors. + Since for real-valued time samples the complex spectrum is conjugate-even (symmetry), + the spectrum can be fully reconstructed form the positive frequencies only (first half). + The data array needs to be N+2 (if N is even) or N+1 (if N is odd) long in order to support such a packed spectrum. + + Data array of length N+2 (if N is even) or N+1 (if N is odd). + The number of samples. + Fourier Transform Convention Options. + + + + Applies the inverse Fast Fourier Transform (iFFT) to multiple dimensional sample data. + + Spectrum data, where the iFFT is evaluated in place. + + The data size per dimension. The first dimension is the major one. + For example, with two dimensions "rows" and "columns" the samples are assumed to be organized row by row. + + Fourier Transform Convention Options. + + + + Applies the inverse Fast Fourier Transform (iFFT) to two dimensional sample data. + + Sample data, organized row by row, where the iFFT is evaluated in place + The number of rows. + The number of columns. + Data available organized column by column instead of row by row can be processed directly by swapping the rows and columns arguments. + Fourier Transform Convention Options. + + + + Applies the inverse Fast Fourier Transform (iFFT) to a two dimensional data in form of a matrix. + + Sample matrix, where the iFFT is evaluated in place + Fourier Transform Convention Options. + + + + Naive forward DFT, useful e.g. to verify faster algorithms. + + Time-space sample vector. + Fourier Transform Convention Options. + Corresponding frequency-space vector. + + + + Naive inverse DFT, useful e.g. to verify faster algorithms. + + Frequency-space sample vector. + Fourier Transform Convention Options. + Corresponding time-space vector. + + + + Radix-2 forward FFT for power-of-two sized sample vectors. + + Sample vector, where the FFT is evaluated in place. + Fourier Transform Convention Options. + + + + + Radix-2 inverse FFT for power-of-two sized sample vectors. + + Sample vector, where the FFT is evaluated in place. + Fourier Transform Convention Options. + + + + + Bluestein forward FFT for arbitrary sized sample vectors. + + Sample vector, where the FFT is evaluated in place. + Fourier Transform Convention Options. + + + + Bluestein inverse FFT for arbitrary sized sample vectors. + + Sample vector, where the FFT is evaluated in place. + Fourier Transform Convention Options. + + + + Extract the exponent sign to be used in forward transforms according to the + provided convention options. + + Fourier Transform Convention Options. + Fourier series exponent sign. + + + + Rescale FFT-the resulting vector according to the provided convention options. + + Fourier Transform Convention Options. + Sample Vector. + + + + Rescale the iFFT-resulting vector according to the provided convention options. + + Fourier Transform Convention Options. + Sample Vector. + + + + Generate the frequencies corresponding to each index in frequency space. + The frequency space has a resolution of sampleRate/N. + Index 0 corresponds to the DC part, the following indices correspond to + the positive frequencies up to the Nyquist frequency (sampleRate/2), + followed by the negative frequencies wrapped around. + + Number of samples. + The sampling rate of the time-space data. + + + + Naive generic DFT, useful e.g. to verify faster algorithms. + + Time-space sample vector. + Fourier series exponent sign. + Corresponding frequency-space vector. + + + + Radix-2 Reorder Helper Method + + Sample type + Sample vector + + + + Radix-2 Step Helper Method + + Sample vector. + Fourier series exponent sign. + Level Group Size. + Index inside of the level. + + + + Radix-2 generic FFT for power-of-two sized sample vectors. + + Sample vector, where the FFT is evaluated in place. + Fourier series exponent sign. + + + + + Radix-2 generic FFT for power-of-two sample vectors (Parallel Version). + + Sample vector, where the FFT is evaluated in place. + Fourier series exponent sign. + + + + + Fourier Transform Convention + + + + + Inverse integrand exponent (forward: positive sign; inverse: negative sign). + + + + + Only scale by 1/N in the inverse direction; No scaling in forward direction. + + + + + Don't scale at all (neither on forward nor on inverse transformation). + + + + + Universal; Symmetric scaling and common exponent (used in Maple). + + + + + Only scale by 1/N in the inverse direction; No scaling in forward direction (used in Matlab). [= AsymmetricScaling] + + + + + Inverse integrand exponent; No scaling at all (used in all Numerical Recipes based implementations). [= InverseExponent | NoScaling] + + + + + Fast (FHT) Implementation of the Discrete Hartley Transform (DHT). + + + Fast (FHT) Implementation of the Discrete Hartley Transform (DHT). + + + + + Naive forward DHT, useful e.g. to verify faster algorithms. + + Time-space sample vector. + Hartley Transform Convention Options. + Corresponding frequency-space vector. + + + + Naive inverse DHT, useful e.g. to verify faster algorithms. + + Frequency-space sample vector. + Hartley Transform Convention Options. + Corresponding time-space vector. + + + + Rescale FFT-the resulting vector according to the provided convention options. + + Fourier Transform Convention Options. + Sample Vector. + + + + Rescale the iFFT-resulting vector according to the provided convention options. + + Fourier Transform Convention Options. + Sample Vector. + + + + Naive generic DHT, useful e.g. to verify faster algorithms. + + Time-space sample vector. + Corresponding frequency-space vector. + + + + Hartley Transform Convention + + + + + Only scale by 1/N in the inverse direction; No scaling in forward direction. + + + + + Don't scale at all (neither on forward nor on inverse transformation). + + + + + Universal; Symmetric scaling. + + + + + Numerical Integration (Quadrature). + + + + + Approximation of the definite integral of an analytic smooth function on a closed interval. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + The expected relative accuracy of the approximation. + Approximation of the finite integral in the given interval. + + + + Approximation of the definite integral of an analytic smooth function on a closed interval. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Approximation of the finite integral in the given interval. + + + + Approximates a 2-dimensional definite integral using an Nth order Gauss-Legendre rule over the rectangle [a,b] x [c,d]. + + The 2-dimensional analytic smooth function to integrate. + Where the interval starts for the first (inside) integral, exclusive and finite. + Where the interval ends for the first (inside) integral, exclusive and finite. + Where the interval starts for the second (outside) integral, exclusive and finite. + /// Where the interval ends for the second (outside) integral, exclusive and finite. + Defines an Nth order Gauss-Legendre rule. The order also defines the number of abscissas and weights for the rule. Precomputed Gauss-Legendre abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024 are used, otherwise they're calulcated on the fly. + Approximation of the finite integral in the given interval. + + + + Approximates a 2-dimensional definite integral using an Nth order Gauss-Legendre rule over the rectangle [a,b] x [c,d]. + + The 2-dimensional analytic smooth function to integrate. + Where the interval starts for the first (inside) integral, exclusive and finite. + Where the interval ends for the first (inside) integral, exclusive and finite. + Where the interval starts for the second (outside) integral, exclusive and finite. + /// Where the interval ends for the second (outside) integral, exclusive and finite. + Approximation of the finite integral in the given interval. + + + + Analytic integration algorithm for smooth functions with no discontinuities + or derivative discontinuities and no poles inside the interval. + + + + + Maximum number of iterations, until the asked + maximum error is (likely to be) satisfied. + + + + + Approximate the integral by the double exponential transformation + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + The expected relative accuracy of the approximation. + Approximation of the finite integral in the given interval. + + + + Compute the abscissa vector for a single level. + + The level to evaluate the abscissa vector for. + Abscissa Vector. + + + + Compute the weight vector for a single level. + + The level to evaluate the weight vector for. + Weight Vector. + + + + Precomputed abscissa vector per level. + + + + + Precomputed weight vector per level. + + + + + Approximates a definite integral using an Nth order Gauss-Legendre rule. Precomputed Gauss-Legendre abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024 are used, otherwise they're calulcated on the fly. + + + + + Initializes a new instance of the class. + + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Defines an Nth order Gauss-Legendre rule. The order also defines the number of abscissas and weights for the rule. Precomputed Gauss-Legendre abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024 are used, otherwise they're calulcated on the fly. + + + + Gettter for the ith abscissa. + + Index of the ith abscissa. + The ith abscissa. + + + + Getter that returns a clone of the array containing the abscissas. + + + + + Getter for the ith weight. + + Index of the ith weight. + The ith weight. + + + + Getter that returns a clone of the array containing the weights. + + + + + Getter for the order. + + + + + Getter for the InvervalBegin. + + + + + Getter for the InvervalEnd. + + + + + Approximates a definite integral using an Nth order Gauss-Legendre rule. + + The analytic smooth function to integrate. + Where the interval starts, exclusive and finite. + Where the interval ends, exclusive and finite. + Defines an Nth order Gauss-Legendre rule. The order also defines the number of abscissas and weights for the rule. Precomputed Gauss-Legendre abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024 are used, otherwise they're calulcated on the fly. + Approximation of the finite integral in the given interval. + + + + Approximates a 2-dimensional definite integral using an Nth order Gauss-Legendre rule over the rectangle [a,b] x [c,d]. + + The 2-dimensional analytic smooth function to integrate. + Where the interval starts for the first (inside) integral, exclusive and finite. + Where the interval ends for the first (inside) integral, exclusive and finite. + Where the interval starts for the second (outside) integral, exclusive and finite. + /// Where the interval ends for the second (outside) integral, exclusive and finite. + Defines an Nth order Gauss-Legendre rule. The order also defines the number of abscissas and weights for the rule. Precomputed Gauss-Legendre abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024 are used, otherwise they're calulcated on the fly. + Approximation of the finite integral in the given interval. + + + + Contains a method to compute the Gauss-Legendre abscissas/weights and precomputed abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024. + + + Contains a method to compute the Gauss-Legendre abscissas/weights and precomputed abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024. + + + + + Precomputed abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024. + + + + + Computes the Gauss-Legendre abscissas/weights. + See Pavel Holoborodko for a description of the algorithm. + + Defines an Nth order Gauss-Legendre rule. The order also defines the number of abscissas and weights for the rule. + Required precision to compute the abscissas/weights. 1e-10 is usually fine. + Object containing the non-negative abscissas/weights, order, and intervalBegin/intervalEnd. The non-negative abscissas/weights are generated over the interval [-1,1] for the given order. + + + + Creates and maps a Gauss-Legendre point. + + + + + Getter for the GaussPoint. + + Defines an Nth order Gauss-Legendre rule. Precomputed Gauss-Legendre abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024 are used, otherwise they're calulcated on the fly. + Object containing the non-negative abscissas/weights, order, and intervalBegin/intervalEnd. The non-negative abscissas/weights are generated over the interval [-1,1] for the given order. + + + + Getter for the GaussPoint. + + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Defines an Nth order Gauss-Legendre rule. Precomputed Gauss-Legendre abscissas/weights for orders 2-20, 32, 64, 96, 100, 128, 256, 512, 1024 are used, otherwise they're calulcated on the fly. + Object containing the abscissas/weights, order, and intervalBegin/intervalEnd. + + + + Maps the non-negative abscissas/weights from the interval [-1, 1] to the interval [intervalBegin, intervalEnd]. + + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Object containing the non-negative abscissas/weights, order, and intervalBegin/intervalEnd. The non-negative abscissas/weights are generated over the interval [-1,1] for the given order. + Object containing the abscissas/weights, order, and intervalBegin/intervalEnd. + + + + Contains the abscissas/weights, order, and intervalBegin/intervalEnd. + + + + + Approximation algorithm for definite integrals by the Trapezium rule of the Newton-Cotes family. + + + Wikipedia - Trapezium Rule + + + + + Direct 2-point approximation of the definite integral in the provided interval by the trapezium rule. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Approximation of the finite integral in the given interval. + + + + Composite N-point approximation of the definite integral in the provided interval by the trapezium rule. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Number of composite subdivision partitions. + Approximation of the finite integral in the given interval. + + + + Adaptive approximation of the definite integral in the provided interval by the trapezium rule. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + The expected accuracy of the approximation. + Approximation of the finite integral in the given interval. + + + + Adaptive approximation of the definite integral by the trapezium rule. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Abscissa vector per level provider. + Weight vector per level provider. + First Level Step + The expected relative accuracy of the approximation. + Approximation of the finite integral in the given interval. + + + + Approximation algorithm for definite integrals by Simpson's rule. + + + + + Direct 3-point approximation of the definite integral in the provided interval by Simpson's rule. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Approximation of the finite integral in the given interval. + + + + Composite N-point approximation of the definite integral in the provided interval by Simpson's rule. + + The analytic smooth function to integrate. + Where the interval starts, inclusive and finite. + Where the interval stops, inclusive and finite. + Even number of composite subdivision partitions. + Approximation of the finite integral in the given interval. + + + + Interpolation Factory. + + + + + Creates an interpolation based on arbitrary points. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.Barycentric.InterpolateRationalFloaterHormannSorted + instead, which is more efficient. + + + + + Create a floater hormann rational pole-free interpolation based on arbitrary points. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.Barycentric.InterpolateRationalFloaterHormannSorted + instead, which is more efficient. + + + + + Create a Bulirsch Stoer rational interpolation based on arbitrary points. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.BulirschStoerRationalInterpolation.InterpolateSorted + instead, which is more efficient. + + + + + Create a barycentric polynomial interpolation where the given sample points are equidistant. + + The sample points t, must be equidistant. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.Barycentric.InterpolatePolynomialEquidistantSorted + instead, which is more efficient. + + + + + Create a Neville polynomial interpolation based on arbitrary points. + If the points happen to be equidistant, consider to use the much more robust PolynomialEquidistant instead. + Otherwise, consider whether RationalWithoutPoles would not be a more robust alternative. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.NevillePolynomialInterpolation.InterpolateSorted + instead, which is more efficient. + + + + + Create a piecewise linear interpolation based on arbitrary points. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.LinearSpline.InterpolateSorted + instead, which is more efficient. + + + + + Create piecewise log-linear interpolation based on arbitrary points. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.LogLinear.InterpolateSorted + instead, which is more efficient. + + + + + Create an piecewise natural cubic spline interpolation based on arbitrary points, + with zero secondary derivatives at the boundaries. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.CubicSpline.InterpolateNaturalSorted + instead, which is more efficient. + + + + + Create an piecewise cubic Akima spline interpolation based on arbitrary points. + Akima splines are robust to outliers. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.CubicSpline.InterpolateAkimaSorted + instead, which is more efficient. + + + + + Create a piecewise cubic Hermite spline interpolation based on arbitrary points + and their slopes/first derivative. + + The sample points t. + The sample point values x(t). + The slope at the sample points. Optimized for arrays. + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.CubicSpline.InterpolateHermiteSorted + instead, which is more efficient. + + + + + Create a step-interpolation based on arbitrary points. + + The sample points t. + The sample point values x(t). + + An interpolation scheme optimized for the given sample points and values, + which can then be used to compute interpolations and extrapolations + on arbitrary points. + + + if your data is already sorted in arrays, consider to use + MathNet.Numerics.Interpolation.StepInterpolation.InterpolateSorted + instead, which is more efficient. + + + + + Barycentric Interpolation Algorithm. + + Supports neither differentiation nor integration. + + + Sample points (N), sorted ascendingly. + Sample values (N), sorted ascendingly by x. + Barycentric weights (N), sorted ascendingly by x. + + + + Create a barycentric polynomial interpolation from a set of (x,y) value pairs with equidistant x, sorted ascendingly by x. + + + + + Create a barycentric polynomial interpolation from an unordered set of (x,y) value pairs with equidistant x. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a barycentric polynomial interpolation from an unsorted set of (x,y) value pairs with equidistant x. + + + + + Create a barycentric polynomial interpolation from a set of values related to linearly/equidistant spaced points within an interval. + + + + + Create a barycentric rational interpolation without poles, using Mike Floater and Kai Hormann's Algorithm. + The values are assumed to be sorted ascendingly by x. + + Sample points (N), sorted ascendingly. + Sample values (N), sorted ascendingly by x. + + Order of the interpolation scheme, 0 <= order <= N. + In most cases a value between 3 and 8 gives good results. + + + + + Create a barycentric rational interpolation without poles, using Mike Floater and Kai Hormann's Algorithm. + WARNING: Works in-place and can thus causes the data array to be reordered. + + Sample points (N), no sorting assumed. + Sample values (N). + + Order of the interpolation scheme, 0 <= order <= N. + In most cases a value between 3 and 8 gives good results. + + + + + Create a barycentric rational interpolation without poles, using Mike Floater and Kai Hormann's Algorithm. + + Sample points (N), no sorting assumed. + Sample values (N). + + Order of the interpolation scheme, 0 <= order <= N. + In most cases a value between 3 and 8 gives good results. + + + + + Create a barycentric rational interpolation without poles, using Mike Floater and Kai Hormann's Algorithm. + The values are assumed to be sorted ascendingly by x. + + Sample points (N), sorted ascendingly. + Sample values (N), sorted ascendingly by x. + + + + Create a barycentric rational interpolation without poles, using Mike Floater and Kai Hormann's Algorithm. + WARNING: Works in-place and can thus causes the data array to be reordered. + + Sample points (N), no sorting assumed. + Sample values (N). + + + + Create a barycentric rational interpolation without poles, using Mike Floater and Kai Hormann's Algorithm. + + Sample points (N), no sorting assumed. + Sample values (N). + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. NOT SUPPORTED. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. NOT SUPPORTED. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. NOT SUPPORTED. + + Point t to integrate at. + + + + Definite integral between points a and b. NOT SUPPORTED. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Rational Interpolation (with poles) using Roland Bulirsch and Josef Stoer's Algorithm. + + + + This algorithm supports neither differentiation nor integration. + + + + + Sample Points t, sorted ascendingly. + Sample Values x(t), sorted ascendingly by x. + + + + Create a Bulirsch-Stoer rational interpolation from a set of (x,y) value pairs, sorted ascendingly by x. + + + + + Create a Bulirsch-Stoer rational interpolation from an unsorted set of (x,y) value pairs. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a Bulirsch-Stoer rational interpolation from an unsorted set of (x,y) value pairs. + + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. NOT SUPPORTED. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. NOT SUPPORTED. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. NOT SUPPORTED. + + Point t to integrate at. + + + + Definite integral between points a and b. NOT SUPPORTED. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Cubic Spline Interpolation. + + Supports both differentiation and integration. + + + sample points (N+1), sorted ascending + Zero order spline coefficients (N) + First order spline coefficients (N) + second order spline coefficients (N) + third order spline coefficients (N) + + + + Create a hermite cubic spline interpolation from a set of (x,y) value pairs and their slope (first derivative), sorted ascendingly by x. + + + + + Create a hermite cubic spline interpolation from an unsorted set of (x,y) value pairs and their slope (first derivative). + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a hermite cubic spline interpolation from an unsorted set of (x,y) value pairs and their slope (first derivative). + + + + + Create an Akima cubic spline interpolation from a set of (x,y) value pairs, sorted ascendingly by x. + Akima splines are robust to outliers. + + + + + Create an Akima cubic spline interpolation from an unsorted set of (x,y) value pairs. + Akima splines are robust to outliers. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create an Akima cubic spline interpolation from an unsorted set of (x,y) value pairs. + Akima splines are robust to outliers. + + + + + Create a cubic spline interpolation from a set of (x,y) value pairs, sorted ascendingly by x, + and custom boundary/termination conditions. + + + + + Create a cubic spline interpolation from an unsorted set of (x,y) value pairs and custom boundary/termination conditions. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a cubic spline interpolation from an unsorted set of (x,y) value pairs and custom boundary/termination conditions. + + + + + Create a natural cubic spline interpolation from a set of (x,y) value pairs + and zero second derivatives at the two boundaries, sorted ascendingly by x. + + + + + Create a natural cubic spline interpolation from an unsorted set of (x,y) value pairs + and zero second derivatives at the two boundaries. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a natural cubic spline interpolation from an unsorted set of (x,y) value pairs + and zero second derivatives at the two boundaries. + + + + + Three-Point Differentiation Helper. + + Sample Points t. + Sample Values x(t). + Index of the point of the differentiation. + Index of the first sample. + Index of the second sample. + Index of the third sample. + The derivative approximation. + + + + Tridiagonal Solve Helper. + + The a-vector[n]. + The b-vector[n], will be modified by this function. + The c-vector[n]. + The d-vector[n], will be modified by this function. + The x-vector[n] + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. + + Point t to integrate at. + + + + Definite integral between points a and b. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Find the index of the greatest sample point smaller than t, + or the left index of the closest segment for extrapolation. + + + + + Interpolation within the range of a discrete set of known data points. + + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. + + Point t to integrate at. + + + + Definite integral between points a and b. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Piece-wise Linear Interpolation. + + Supports both differentiation and integration. + + + Sample points (N+1), sorted ascending + Sample values (N or N+1) at the corresponding points; intercept, zero order coefficients + Slopes (N) at the sample points (first order coefficients): N + + + + Create a linear spline interpolation from a set of (x,y) value pairs, sorted ascendingly by x. + + + + + Create a linear spline interpolation from an unsorted set of (x,y) value pairs. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a linear spline interpolation from an unsorted set of (x,y) value pairs. + + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. + + Point t to integrate at. + + + + Definite integral between points a and b. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Find the index of the greatest sample point smaller than t, + or the left index of the closest segment for extrapolation. + + + + + Piece-wise Log-Linear Interpolation + + This algorithm supports differentiation, not integration. + + + + Internal Spline Interpolation + + + + Sample points (N), sorted ascending + Natural logarithm of the sample values (N) at the corresponding points + + + + Create a piecewise log-linear interpolation from a set of (x,y) value pairs, sorted ascendingly by x. + + + + + Create a piecewise log-linear interpolation from an unsorted set of (x,y) value pairs. + WARNING: Works in-place and can thus causes the data array to be reordered and modified. + + + + + Create a piecewise log-linear interpolation from an unsorted set of (x,y) value pairs. + + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. + + Point t to integrate at. + + + + Definite integral between points a and b. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Lagrange Polynomial Interpolation using Neville's Algorithm. + + + + This algorithm supports differentiation, but doesn't support integration. + + + When working with equidistant or Chebyshev sample points it is + recommended to use the barycentric algorithms specialized for + these cases instead of this arbitrary Neville algorithm. + + + + + Sample Points t, sorted ascendingly. + Sample Values x(t), sorted ascendingly by x. + + + + Create a Neville polynomial interpolation from a set of (x,y) value pairs, sorted ascendingly by x. + + + + + Create a Neville polynomial interpolation from an unsorted set of (x,y) value pairs. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a Neville polynomial interpolation from an unsorted set of (x,y) value pairs. + + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. NOT SUPPORTED. + + Point t to integrate at. + + + + Definite integral between points a and b. NOT SUPPORTED. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Quadratic Spline Interpolation. + + Supports both differentiation and integration. + + + sample points (N+1), sorted ascending + Zero order spline coefficients (N) + First order spline coefficients (N) + second order spline coefficients (N) + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. + + Point t to integrate at. + + + + Definite integral between points a and b. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Find the index of the greatest sample point smaller than t, + or the left index of the closest segment for extrapolation. + + + + + Left and right boundary conditions. + + + + + Natural Boundary (Zero second derivative). + + + + + Parabolically Terminated boundary. + + + + + Fixed first derivative at the boundary. + + + + + Fixed second derivative at the boundary. + + + + + A step function where the start of each segment is included, and the last segment is open-ended. + Segment i is [x_i, x_i+1) for i < N, or [x_i, infinity] for i = N. + The domain of the function is all real numbers, such that y = 0 where x <. + + Supports both differentiation and integration. + + + Sample points (N), sorted ascending + Samples values (N) of each segment starting at the corresponding sample point. + + + + Create a linear spline interpolation from a set of (x,y) value pairs, sorted ascendingly by x. + + + + + Create a linear spline interpolation from an unsorted set of (x,y) value pairs. + WARNING: Works in-place and can thus causes the data array to be reordered. + + + + + Create a linear spline interpolation from an unsorted set of (x,y) value pairs. + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. + + Point t to integrate at. + + + + Definite integral between points a and b. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + Find the index of the greatest sample point smaller than t. + + + + + Wraps an interpolation with a transformation of the interpolated values. + + Neither differentiation nor integration is supported. + + + + Create a linear spline interpolation from a set of (x,y) value pairs, sorted ascendingly by x. + + + + + Create a linear spline interpolation from an unsorted set of (x,y) value pairs. + WARNING: Works in-place and can thus causes the data array to be reordered and modified. + + + + + Create a linear spline interpolation from an unsorted set of (x,y) value pairs. + + + + + Gets a value indicating whether the algorithm supports differentiation (interpolated derivative). + + + + + Gets a value indicating whether the algorithm supports integration (interpolated quadrature). + + + + + Interpolate at point t. + + Point t to interpolate at. + Interpolated value x(t). + + + + Differentiate at point t. NOT SUPPORTED. + + Point t to interpolate at. + Interpolated first derivative at point t. + + + + Differentiate twice at point t. NOT SUPPORTED. + + Point t to interpolate at. + Interpolated second derivative at point t. + + + + Indefinite integral at point t. NOT SUPPORTED. + + Point t to integrate at. + + + + Definite integral between points a and b. NOT SUPPORTED. + + Left bound of the integration interval [a,b]. + Right bound of the integration interval [a,b]. + + + + A Matrix class with dense storage. The underlying storage is a one dimensional array in column-major order (column by column). + + + + + Number of rows. + + Using this instead of the RowCount property to speed up calculating + a matrix index in the data array. + + + + Number of columns. + + Using this instead of the ColumnCount property to speed up calculating + a matrix index in the data array. + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new dense matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new dense matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to be in column-major order (column by column) and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + + Create a new dense matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable. + The enumerable is assumed to be in column-major order (column by column). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix and initialize each value to the same provided value. + + + + + Create a new dense matrix and initialize each value using the provided init function. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new dense matrix with values sampled from the provided random distribution. + + + + + Gets the matrix's data. + + The matrix's data. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of add + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the matrix and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Evaluates whether this matrix is symmetric. + + + + + A vector using dense storage. + + + + + Number of elements + + + + + Gets the vector's data. + + + + + Create a new dense vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new dense vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new dense vector directly binding to a raw array. + The array is used directly without copying. + Very efficient, but changes to the array and the vector will affect each other. + + + + + Create a new dense vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector and initialize each value using the provided value. + + + + + Create a new dense vector and initialize each value using the provided init function. + + + + + Create a new dense vector with values sampled from the provided random distribution. + + + + + Gets the vector's data. + + The vector's data. + + + + Returns a reference to the internal data structure. + + The DenseVector whose internal data we are + returning. + + A reference to the internal date of the given vector. + + + + + Returns a vector bound directly to a reference of the provided array. + + The array to bind to the DenseVector object. + + A DenseVector whose values are bound to the given array. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + The scalar to add. + The vector to store the result of the addition. + + + + Adds another vector to this vector and stores the result into the result vector. + + The vector to add to this one. + The vector to store the result of the addition. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The vector to store the result of the subtraction. + + + + Subtracts another vector to this vector and stores the result into the result vector. + + The vector to subtract from this one. + The vector to store the result of the subtraction. + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Negates vector and saves result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + The scalar to multiply. + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Multiplies a vector with a scalar. + + The vector to scale. + The scalar value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a scalar. + + The scalar value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a scalar. + + The vector to divide. + The scalar value. + The result of the division. + If is . + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The divisor to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The divisor to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + of each element of the vector of the given divisor. + + The vector whose elements we want to compute the remainder of. + The divisor to use, + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Creates a double dense vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n,n,..', '(n,n,..)', '[n,n,...]', where n is a double. + + + A double dense vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a real dense vector to double-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a real dense vector to double-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + A matrix type for diagonal matrices. + + + Diagonal matrices can be non-square matrices but the diagonal always starts + at element 0,0. A diagonal matrix will throw an exception if non diagonal + entries are set. The exception to this is when the off diagonal elements are + 0.0 or NaN; these settings will cause no change to the diagonal matrix. + + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new diagonal matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All diagonal cells of the matrix will be initialized to the provided value, all non-diagonal ones to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to contain the diagonal elements only and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new diagonal matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + The matrix to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + The array to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided enumerable. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new diagonal matrix with diagonal values sampled from the provided random distribution. + + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + If the result matrix's dimensions are not the same as this matrix. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to add. + The matrix to store the result of the division. + + + + Computes the determinant of this matrix. + + The determinant of this matrix. + + + + Returns the elements of the diagonal in a . + + The elements of the diagonal. + For non-square matrices, the method returns Min(Rows, Columns) elements where + i == j (i is the row index, and j is the column index). + + + + Copies the values of the given array to the diagonal. + + The array to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + + Copies the values of the given to the diagonal. + + The vector to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced L2 norm of the matrix. + The largest singular value of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + Calculates the condition number of this matrix. + The condition number of the matrix. + + + Computes the inverse of this matrix. + If is not a square matrix. + If is singular. + The inverse of this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Creates a matrix that contains the values from the requested sub-matrix. + + The row to start copying from. + The number of rows to copy. Must be positive. + The column to start copying from. + The number of columns to copy. Must be positive. + The requested sub-matrix. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + If or + is not positive. + + + + Permute the columns of a matrix according to a permutation. + + The column permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Permute the rows of a matrix according to a permutation. + + The row permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Evaluates whether this matrix is symmetric. + + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + A class which encapsulates the functionality of a Cholesky factorization. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Gets the determinant of the matrix for which the Cholesky matrix was computed. + + + + + Gets the log determinant of the matrix for which the Cholesky matrix was computed. + + + + + A class which encapsulates the functionality of a Cholesky factorization for dense matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Symmetric Householder reduction to tridiagonal form. + + Data array of matrix V (eigenvectors) + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tred2 by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + Data array of matrix V (eigenvectors) + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Nonsymmetric reduction to Hessenberg form. + + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Complex scalar division X/Y. + + Real part of X + Imaginary part of X + Real part of Y + Imaginary part of Y + Division result as a number. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an orthogonal matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Factorize matrix using the modified Gram-Schmidt method. + + Initial matrix. On exit is replaced by Q. + Number of rows in Q. + Number of columns in Q. + On exit is filled by R. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Gets or sets Tau vector. Contains additional information on Q - used for native solver. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The type of QR factorization to perform. + If is null. + If row count is less then column count + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + If SVD algorithm failed to converge with matrix . + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + Matrix V is encoded in the property EigenVectors in the way that: + - column corresponding to real eigenvalue represents real eigenvector, + - columns corresponding to the pair of complex conjugate eigenvalues + lambda[i] and lambda[i+1] encode real and imaginary parts of eigenvectors. + + + + + Gets the absolute value of determinant of the square matrix for which the EVD was computed. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + In the Math.Net implementation we also store a set of pivot elements for increased + numerical stability. The pivot elements encode a permutation matrix P such that P*A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Gets the determinant of the matrix for which the LU factorization was computed. + + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A (m x n) may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + If a factorization is performed, the resulting Q matrix is an m x m matrix + and the R matrix is an m x n matrix. If a factorization is performed, the + resulting Q matrix is an m x n matrix and the R matrix is an n x n matrix. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD). + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets the two norm of the . + + The 2-norm of the . + + + + Gets the condition number max(S) / min(S) + + The condition number. + + + + Gets the determinant of the square matrix for which the SVD was computed. + + + + + A class which encapsulates the functionality of a Cholesky factorization for user matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Symmetric Householder reduction to tridiagonal form. + + The eigen vectors to work on. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tred2 by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + The eigen vectors to work on. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Nonsymmetric reduction to Hessenberg form. + + The eigen vectors to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + The eigen vectors to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Complex scalar division X/Y. + + Real part of X + Imaginary part of X + Real part of Y + Imaginary part of Y + Division result as a number. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an orthogonal matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The QR factorization method to use. + If is null. + + + + Generate column from initial matrix to work array + + Initial matrix + The first row + Column index + Generated vector + + + + Perform calculation of Q or R + + Work array + Q or R matrices + The first row + The last row + The first column + The last column + Number of available CPUs + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + + + + + Calculates absolute value of multiplied on signum function of + + Double value z1 + Double value z2 + Result multiplication of signum function and absolute value + + + + Swap column and + + Source matrix + The number of rows in + Column A index to swap + Column B index to swap + + + + Scale column by starting from row + + Source matrix + The number of rows in + Column to scale + Row to scale from + Scale value + + + + Scale vector by starting from index + + Source vector + Row to scale from + Scale value + + + + Given the Cartesian coordinates (da, db) of a point p, these fucntion return the parameters da, db, c, and s + associated with the Givens rotation that zeros the y-coordinate of the point. + + Provides the x-coordinate of the point p. On exit contains the parameter r associated with the Givens rotation + Provides the y-coordinate of the point p. On exit contains the parameter z associated with the Givens rotation + Contains the parameter c associated with the Givens rotation + Contains the parameter s associated with the Givens rotation + This is equivalent to the DROTG LAPACK routine. + + + + Calculate Norm 2 of the column in matrix starting from row + + Source matrix + The number of rows in + Column index + Start row index + Norm2 (Euclidean norm) of the column + + + + Calculate Norm 2 of the vector starting from index + + Source vector + Start index + Norm2 (Euclidean norm) of the vector + + + + Calculate dot product of and + + Source matrix + The number of rows in + Index of column A + Index of column B + Starting row index + Dot product value + + + + Performs rotation of points in the plane. Given two vectors x and y , + each vector element of these vectors is replaced as follows: x(i) = c*x(i) + s*y(i); y(i) = c*y(i) - s*x(i) + + Source matrix + The number of rows in + Index of column A + Index of column B + Scalar "c" value + Scalar "s" value + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + double version of the class. + + + + + Initializes a new instance of the Matrix class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Returns the conjugate transpose of this matrix. + + The conjugate transpose of this matrix. + + + + Puts the conjugate transpose of this matrix into the result matrix. + + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to divide by each element of the matrix. + The matrix to store the result of the division. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The matrix to store the result of the pointwise power. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Computes the Moore-Penrose Pseudo-Inverse of this matrix. + + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Calculates the p-norms of all row vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the p-norms of all column vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all row vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all column vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the value sum of each row vector. + + + + + Calculates the absolute value sum of each row vector. + + + + + Calculates the value sum of each column vector. + + + + + Calculates the absolute value sum of each column vector. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Bi-Conjugate Gradient Stabilized (BiCGStab) solver is an 'improvement' + of the standard Conjugate Gradient (CG) solver. Unlike the CG solver the + BiCGStab can be used on non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The Bi-CGSTAB algorithm was taken from:
+ Templates for the solution of linear systems: Building blocks + for iterative methods +
+ Richard Barrett, Michael Berry, Tony F. Chan, James Demmel, + June M. Donato, Jack Dongarra, Victor Eijkhout, Roldan Pozo, + Charles Romine and Henk van der Vorst +
+ Url: http://www.netlib.org/templates/Templates.html +
+ Algorithm is described in Chapter 2, section 2.3.8, page 27 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient , A. + The solution , b. + The result , x. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A composite matrix solver. The actual solver is made by a sequence of + matrix solvers. + + + + Solver based on:
+ Faster PDE-based simulations using robust composite linear solvers
+ S. Bhowmicka, P. Raghavan a,*, L. McInnes b, B. Norris
+ Future Generation Computer Systems, Vol 20, 2004, pp 373–387
+
+ + Note that if an iterator is passed to this solver it will be used for all the sub-solvers. + +
+
+ + + The collection of solvers that will be used + + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A diagonal preconditioner. The preconditioner uses the inverse + of the matrix diagonal as preconditioning values. + + + + + The inverse of the matrix diagonal. + + + + + Returns the decomposed matrix diagonal. + + The matrix diagonal. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + A Generalized Product Bi-Conjugate Gradient iterative matrix solver. + + + + The Generalized Product Bi-Conjugate Gradient (GPBiCG) solver is an + alternative version of the Bi-Conjugate Gradient stabilized (CG) solver. + Unlike the CG solver the GPBiCG solver can be used on + non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The GPBiCG algorithm was taken from:
+ GPBiCG(m,l): A hybrid of BiCGSTAB and GPBiCG methods with + efficiency and robustness +
+ S. Fujino +
+ Applied Numerical Mathematics, Volume 41, 2002, pp 107 - 117 +
+
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Indicates the number of BiCGStab steps should be taken + before switching. + + + + + Indicates the number of GPBiCG steps should be taken + before switching. + + + + + Gets or sets the number of steps taken with the BiCgStab algorithm + before switching over to the GPBiCG algorithm. + + + + + Gets or sets the number of steps taken with the GPBiCG algorithm + before switching over to the BiCgStab algorithm. + + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Decide if to do steps with BiCgStab + + Number of iteration + true if yes, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + An incomplete, level 0, LU factorization preconditioner. + + + The ILU(0) algorithm was taken from:
+ Iterative methods for sparse linear systems
+ Yousef Saad
+ Algorithm is described in Chapter 10, section 10.3.2, page 275
+
+
+ + + The matrix holding the lower (L) and upper (U) matrices. The + decomposition matrices are combined to reduce storage. + + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + A new matrix containing the lower triagonal elements. + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + This class performs an Incomplete LU factorization with drop tolerance + and partial pivoting. The drop tolerance indicates which additional entries + will be dropped from the factorized LU matrices. + + + The ILUTP-Mem algorithm was taken from:
+ ILUTP_Mem: a Space-Efficient Incomplete LU Preconditioner +
+ Tzu-Yi Chen, Department of Mathematics and Computer Science,
+ Pomona College, Claremont CA 91711, USA
+ Published in:
+ Lecture Notes in Computer Science
+ Volume 3046 / 2004
+ pp. 20 - 28
+ Algorithm is described in Section 2, page 22 +
+
+ + + The default fill level. + + + + + The default drop tolerance. + + + + + The decomposed upper triangular matrix. + + + + + The decomposed lower triangular matrix. + + + + + The array containing the pivot values. + + + + + The fill level. + + + + + The drop tolerance. + + + + + The pivot tolerance. + + + + + Initializes a new instance of the class with the default settings. + + + + + Initializes a new instance of the class with the specified settings. + + + The amount of fill that is allowed in the matrix. The value is a fraction of + the number of non-zero entries in the original matrix. Values should be positive. + + + The absolute drop tolerance which indicates below what absolute value an entry + will be dropped from the matrix. A drop tolerance of 0.0 means that no values + will be dropped. Values should always be positive. + + + The pivot tolerance which indicates at what level pivoting will take place. A + value of 0.0 means that no pivoting will take place. + + + + + Gets or sets the amount of fill that is allowed in the matrix. The + value is a fraction of the number of non-zero entries in the original + matrix. The standard value is 200. + + + + Values should always be positive and can be higher than 1.0. A value lower + than 1.0 means that the eventual preconditioner matrix will have fewer + non-zero entries as the original matrix. A value higher than 1.0 means that + the eventual preconditioner can have more non-zero values than the original + matrix. + + + Note that any changes to the FillLevel after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the absolute drop tolerance which indicates below what absolute value + an entry will be dropped from the matrix. The standard value is 0.0001. + + + + The values should always be positive and can be larger than 1.0. A low value will + keep more small numbers in the preconditioner matrix. A high value will remove + more small numbers from the preconditioner matrix. + + + Note that any changes to the DropTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the pivot tolerance which indicates at what level pivoting will + take place. The standard value is 0.0 which means pivoting will never take place. + + + + The pivot tolerance is used to calculate if pivoting is necessary. Pivoting + will take place if any of the values in a row is bigger than the + diagonal value of that row divided by the pivot tolerance, i.e. pivoting + will take place if row(i,j) > row(i,i) / PivotTolerance for + any j that is not equal to i. + + + Note that any changes to the PivotTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the lower triagonal elements. + + + + Returns the pivot array. This array is not needed for normal use because + the preconditioner will return the solution vector values in the proper order. + + + This method is used for debugging purposes only and should normally not be used. + + The pivot array. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. Note that the + method takes a general matrix type. However internally the data is stored + as a sparse matrix. Therefore it is not recommended to pass a dense matrix. + + If is . + If is not a square matrix. + + + + Pivot elements in the according to internal pivot array + + Row to pivot in + + + + Was pivoting already performed + + Pivots already done + Current item to pivot + true if performed, otherwise false + + + + Swap columns in the + + Source . + First column index to swap + Second column index to swap + + + + Sort vector descending, not changing vector but placing sorted indicies to + + Start sort form + Sort till upper bound + Array with sorted vector indicies + Source + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + Pivot elements in according to internal pivot array + + Source . + Result after pivoting. + + + + An element sort algorithm for the class. + + + This sort algorithm is used to sort the columns in a sparse matrix based on + the value of the element on the diagonal of the matrix. + + + + + Sorts the elements of the vector in decreasing + fashion. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Sorts the elements of the vector in decreasing + fashion using heap sort algorithm. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Build heap for double indicies + + Root position + Length of + Indicies of + Target + + + + Sift double indicies + + Indicies of + Target + Root position + Length of + + + + Sorts the given integers in a decreasing fashion. + + The values. + + + + Sort the given integers in a decreasing fashion using heapsort algorithm + + Array of values to sort + Length of + + + + Build heap + + Target values array + Root position + Length of + + + + Sift values + + Target value array + Root position + Length of + + + + Exchange values in array + + Target values array + First value to exchange + Second value to exchange + + + + A simple milu(0) preconditioner. + + + Original Fortran code by Youcef Saad (07 January 2004) + + + + Use modified or standard ILU(0) + + + + Gets or sets a value indicating whether to use modified or standard ILU(0). + + + + + Gets a value indicating whether the preconditioner is initialized. + + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square or is not an + instance of SparseCompressedRowMatrixStorage. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector b. + The left hand side vector x. + + + + MILU0 is a simple milu(0) preconditioner. + + Order of the matrix. + Matrix values in CSR format (input). + Column indices (input). + Row pointers (input). + Matrix values in MSR format (output). + Row pointers and column indices (output). + Pointer to diagonal elements (output). + True if the modified/MILU algorithm should be used (recommended) + Returns 0 on success or k > 0 if a zero pivot was encountered at step k. + + + + A Multiple-Lanczos Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Multiple-Lanczos Bi-Conjugate Gradient stabilized (ML(k)-BiCGStab) solver is an 'improvement' + of the standard BiCgStab solver. + + + The algorithm was taken from:
+ ML(k)BiCGSTAB: A BiCGSTAB variant based on multiple Lanczos starting vectors +
+ Man-chung Yeung and Tony F. Chan +
+ SIAM Journal of Scientific Computing +
+ Volume 21, Number 4, pp. 1263 - 1290 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + The default number of starting vectors. + + + + + The collection of starting vectors which are used as the basis for the Krylov sub-space. + + + + + The number of starting vectors used by the algorithm + + + + + Gets or sets the number of starting vectors. + + + Must be larger than 1 and smaller than the number of variables in the matrix that + for which this solver will be used. + + + + + Resets the number of starting vectors to the default value. + + + + + Gets or sets a series of orthonormal vectors which will be used as basis for the + Krylov sub-space. + + + + + Gets the number of starting vectors to create + + Maximum number + Number of variables + Number of starting vectors to create + + + + Returns an array of starting vectors. + + The maximum number of starting vectors that should be created. + The number of variables. + + An array with starting vectors. The array will never be larger than the + but it may be smaller if + the is smaller than + the . + + + + + Create random vecrors array + + Number of vectors + Size of each vector + Array of random vectors + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Source A. + Residual data. + x data. + b data. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Transpose Free Quasi-Minimal Residual (TFQMR) iterative matrix solver. + + + + The TFQMR algorithm was taken from:
+ Iterative methods for sparse linear systems. +
+ Yousef Saad +
+ Algorithm is described in Chapter 7, section 7.4.3, page 219 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Is even? + + Number to check + true if even, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Matrix with sparse storage, intended for very large matrices where most of the cells are zero. + The underlying storage scheme is 3-array compressed-sparse-row (CSR) Format. + Wikipedia - CSR. + + + + + Gets the number of non zero elements in the matrix. + + The number of non zero elements. + + + + Create a new sparse matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new sparse matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable. + The enumerable is assumed to be in row-major order (row by row). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + + Create a new sparse matrix with the given number of rows and columns as a copy of the given array. + The array is assumed to be in column-major order (column by column). + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix and initialize each value to the same provided value. + + + + + Create a new sparse matrix and initialize each value using the provided init function. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Evaluates whether this matrix is symmetric. + + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + A vector with sparse storage, intended for very large vectors where most of the cells are zero. + + The sparse vector is not thread safe. + + + + Gets the number of non zero elements in the vector. + + The number of non zero elements. + + + + Create a new sparse vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new sparse vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new sparse vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector and initialize each value using the provided value. + + + + + Create a new sparse vector and initialize each value using the provided init function. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + Warning, the new 'sparse vector' with a non-zero scalar added to it will be a 100% filled + sparse vector and very inefficient. Would be better to work with a dense vector instead. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Negates vector and saves result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Multiplies a vector with a scalar. + + The vector to scale. + The scalar value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a scalar. + + The scalar value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a scalar. + + The vector to divide. + The scalar value. + The result of the division. + If is . + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + of each element of the vector of the given divisor. + + The vector whose elements we want to compute the modulus of. + The divisor to use, + The result of the calculation + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Creates a double sparse vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n,n,..', '(n,n,..)', '[n,n,...]', where n is a double. + + + A double sparse vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a real sparse vector to double-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a real sparse vector to double-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + double version of the class. + + + + + Initializes a new instance of the Vector class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Conjugates vector and save result to + + Target vector + + + + Negates vector and saves result to + + Target vector + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Divides each element of the vector by a scalar and stores the result in the result vector. + + + The scalar to divide with. + + + The vector to store the result of the division. + + + + + Divides a scalar by each element of the vector and stores the result in the result vector. + + The scalar to divide. + The vector to store the result of the division. + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise raise this vector to an exponent and store the result into the result vector. + + The exponent to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Returns the value of the absolute minimum element. + + The value of the absolute minimum element. + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the value of the absolute maximum element. + + The value of the absolute maximum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + + The p value. + + + Scalar ret = ( ∑|At(i)|^p )^(1/p) + + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Normalizes this vector to a unit vector with respect to the p-norm. + + + The p value. + + + This vector normalized to a unit vector with respect to the p-norm. + + + + + A Matrix class with dense storage. The underlying storage is a one dimensional array in column-major order (column by column). + + + + + Number of rows. + + Using this instead of the RowCount property to speed up calculating + a matrix index in the data array. + + + + Number of columns. + + Using this instead of the ColumnCount property to speed up calculating + a matrix index in the data array. + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new dense matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new dense matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to be in column-major order (column by column) and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + + Create a new dense matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable. + The enumerable is assumed to be in column-major order (column by column). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix and initialize each value to the same provided value. + + + + + Create a new dense matrix and initialize each value using the provided init function. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new dense matrix with values sampled from the provided random distribution. + + + + + Gets the matrix's data. + + The matrix's data. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of add + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the matrix and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Evaluates whether this matrix is symmetric. + + + + + A vector using dense storage. + + + + + Number of elements + + + + + Gets the vector's data. + + + + + Create a new dense vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new dense vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new dense vector directly binding to a raw array. + The array is used directly without copying. + Very efficient, but changes to the array and the vector will affect each other. + + + + + Create a new dense vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector and initialize each value using the provided value. + + + + + Create a new dense vector and initialize each value using the provided init function. + + + + + Create a new dense vector with values sampled from the provided random distribution. + + + + + Gets the vector's data. + + The vector's data. + + + + Returns a reference to the internal data structure. + + The DenseVector whose internal data we are + returning. + + A reference to the internal date of the given vector. + + + + + Returns a vector bound directly to a reference of the provided array. + + The array to bind to the DenseVector object. + + A DenseVector whose values are bound to the given array. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + The scalar to add. + The vector to store the result of the addition. + + + + Adds another vector to this vector and stores the result into the result vector. + + The vector to add to this one. + The vector to store the result of the addition. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The vector to store the result of the subtraction. + + + + Subtracts another vector from this vector and stores the result into the result vector. + + The vector to subtract from this one. + The vector to store the result of the subtraction. + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Negates vector and saves result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + The scalar to multiply. + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Multiplies a vector with a scalar. + + The vector to scale. + The scalar value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a scalar. + + The scalar value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a scalar. + + The vector to divide. + The scalar value. + The result of the division. + If is . + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + of each element of the vector of the given divisor. + + The vector whose elements we want to compute the modulus of. + The divisor to use, + The result of the calculation + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Creates a float dense vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n,n,..', '(n,n,..)', '[n,n,...]', where n is a float. + + + A float dense vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a real dense vector to float-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a real dense vector to float-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + A matrix type for diagonal matrices. + + + Diagonal matrices can be non-square matrices but the diagonal always starts + at element 0,0. A diagonal matrix will throw an exception if non diagonal + entries are set. The exception to this is when the off diagonal elements are + 0.0 or NaN; these settings will cause no change to the diagonal matrix. + + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new diagonal matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All diagonal cells of the matrix will be initialized to the provided value, all non-diagonal ones to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to contain the diagonal elements only and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new diagonal matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + The matrix to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + The array to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided enumerable. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new diagonal matrix with diagonal values sampled from the provided random distribution. + + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + If the result matrix's dimensions are not the same as this matrix. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to add. + The matrix to store the result of the division. + + + + Computes the determinant of this matrix. + + The determinant of this matrix. + + + + Returns the elements of the diagonal in a . + + The elements of the diagonal. + For non-square matrices, the method returns Min(Rows, Columns) elements where + i == j (i is the row index, and j is the column index). + + + + Copies the values of the given array to the diagonal. + + The array to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + + Copies the values of the given to the diagonal. + + The vector to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced L2 norm of the matrix. + The largest singular value of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + Calculates the condition number of this matrix. + The condition number of the matrix. + + + Computes the inverse of this matrix. + If is not a square matrix. + If is singular. + The inverse of this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Creates a matrix that contains the values from the requested sub-matrix. + + The row to start copying from. + The number of rows to copy. Must be positive. + The column to start copying from. + The number of columns to copy. Must be positive. + The requested sub-matrix. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + If or + is not positive. + + + + Permute the columns of a matrix according to a permutation. + + The column permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Permute the rows of a matrix according to a permutation. + + The row permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Evaluates whether this matrix is symmetric. + + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + A class which encapsulates the functionality of a Cholesky factorization. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Gets the determinant of the matrix for which the Cholesky matrix was computed. + + + + + Gets the log determinant of the matrix for which the Cholesky matrix was computed. + + + + + A class which encapsulates the functionality of a Cholesky factorization for dense matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Symmetric Householder reduction to tridiagonal form. + + Data array of matrix V (eigenvectors) + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tred2 by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + Data array of matrix V (eigenvectors) + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Nonsymmetric reduction to Hessenberg form. + + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Complex scalar division X/Y. + + Real part of X + Imaginary part of X + Real part of Y + Imaginary part of Y + Division result as a number. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an orthogonal matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Factorize matrix using the modified Gram-Schmidt method. + + Initial matrix. On exit is replaced by Q. + Number of rows in Q. + Number of columns in Q. + On exit is filled by R. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Gets or sets Tau vector. Contains additional information on Q - used for native solver. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The QR factorization method to use. + If is null. + If row count is less then column count + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + If SVD algorithm failed to converge with matrix . + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Gets the absolute value of determinant of the square matrix for which the EVD was computed. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + In the Math.Net implementation we also store a set of pivot elements for increased + numerical stability. The pivot elements encode a permutation matrix P such that P*A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Gets the determinant of the matrix for which the LU factorization was computed. + + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A (m x n) may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + If a factorization is peformed, the resulting Q matrix is an m x m matrix + and the R matrix is an m x n matrix. If a factorization is performed, the + resulting Q matrix is an m x n matrix and the R matrix is an n x n matrix. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD). + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets the two norm of the . + + The 2-norm of the . + + + + Gets the condition number max(S) / min(S) + + The condition number. + + + + Gets the determinant of the square matrix for which the SVD was computed. + + + + + A class which encapsulates the functionality of a Cholesky factorization for user matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Symmetric Householder reduction to tridiagonal form. + + The eigen vectors to work on. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tred2 by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + The eigen vectors to work on. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Nonsymmetric reduction to Hessenberg form. + + The eigen vectors to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + The eigen vectors to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Complex scalar division X/Y. + + Real part of X + Imaginary part of X + Real part of Y + Imaginary part of Y + Division result as a number. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an orthogonal matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The QR factorization method to use. + If is null. + + + + Generate column from initial matrix to work array + + Initial matrix + The first row + Column index + Generated vector + + + + Perform calculation of Q or R + + Work array + Q or R matrices + The first row + The last row + The first column + The last column + Number of available CPUs + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + + + + + Calculates absolute value of multiplied on signum function of + + Double value z1 + Double value z2 + Result multiplication of signum function and absolute value + + + + Swap column and + + Source matrix + The number of rows in + Column A index to swap + Column B index to swap + + + + Scale column by starting from row + + Source matrix + The number of rows in + Column to scale + Row to scale from + Scale value + + + + Scale vector by starting from index + + Source vector + Row to scale from + Scale value + + + + Given the Cartesian coordinates (da, db) of a point p, these fucntion return the parameters da, db, c, and s + associated with the Givens rotation that zeros the y-coordinate of the point. + + Provides the x-coordinate of the point p. On exit contains the parameter r associated with the Givens rotation + Provides the y-coordinate of the point p. On exit contains the parameter z associated with the Givens rotation + Contains the parameter c associated with the Givens rotation + Contains the parameter s associated with the Givens rotation + This is equivalent to the DROTG LAPACK routine. + + + + Calculate Norm 2 of the column in matrix starting from row + + Source matrix + The number of rows in + Column index + Start row index + Norm2 (Euclidean norm) of the column + + + + Calculate Norm 2 of the vector starting from index + + Source vector + Start index + Norm2 (Euclidean norm) of the vector + + + + Calculate dot product of and + + Source matrix + The number of rows in + Index of column A + Index of column B + Starting row index + Dot product value + + + + Performs rotation of points in the plane. Given two vectors x and y , + each vector element of these vectors is replaced as follows: x(i) = c*x(i) + s*y(i); y(i) = c*y(i) - s*x(i) + + Source matrix + The number of rows in + Index of column A + Index of column B + Scalar "c" value + Scalar "s" value + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + float version of the class. + + + + + Initializes a new instance of the Matrix class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Returns the conjugate transpose of this matrix. + + The conjugate transpose of this matrix. + + + + Puts the conjugate transpose of this matrix into the result matrix. + + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to divide by each element of the matrix. + The matrix to store the result of the division. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The matrix to store the result of the pointwise power. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Computes the Moore-Penrose Pseudo-Inverse of this matrix. + + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Calculates the p-norms of all row vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the p-norms of all column vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all row vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all column vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the value sum of each row vector. + + + + + Calculates the absolute value sum of each row vector. + + + + + Calculates the value sum of each column vector. + + + + + Calculates the absolute value sum of each column vector. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Bi-Conjugate Gradient Stabilized (BiCGStab) solver is an 'improvement' + of the standard Conjugate Gradient (CG) solver. Unlike the CG solver the + BiCGStab can be used on non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The Bi-CGSTAB algorithm was taken from:
+ Templates for the solution of linear systems: Building blocks + for iterative methods +
+ Richard Barrett, Michael Berry, Tony F. Chan, James Demmel, + June M. Donato, Jack Dongarra, Victor Eijkhout, Roldan Pozo, + Charles Romine and Henk van der Vorst +
+ Url: http://www.netlib.org/templates/Templates.html +
+ Algorithm is described in Chapter 2, section 2.3.8, page 27 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient , A. + The solution , b. + The result , x. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A composite matrix solver. The actual solver is made by a sequence of + matrix solvers. + + + + Solver based on:
+ Faster PDE-based simulations using robust composite linear solvers
+ S. Bhowmicka, P. Raghavan a,*, L. McInnes b, B. Norris
+ Future Generation Computer Systems, Vol 20, 2004, pp 373–387
+
+ + Note that if an iterator is passed to this solver it will be used for all the sub-solvers. + +
+
+ + + The collection of solvers that will be used + + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A diagonal preconditioner. The preconditioner uses the inverse + of the matrix diagonal as preconditioning values. + + + + + The inverse of the matrix diagonal. + + + + + Returns the decomposed matrix diagonal. + + The matrix diagonal. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + A Generalized Product Bi-Conjugate Gradient iterative matrix solver. + + + + The Generalized Product Bi-Conjugate Gradient (GPBiCG) solver is an + alternative version of the Bi-Conjugate Gradient stabilized (CG) solver. + Unlike the CG solver the GPBiCG solver can be used on + non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The GPBiCG algorithm was taken from:
+ GPBiCG(m,l): A hybrid of BiCGSTAB and GPBiCG methods with + efficiency and robustness +
+ S. Fujino +
+ Applied Numerical Mathematics, Volume 41, 2002, pp 107 - 117 +
+
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Indicates the number of BiCGStab steps should be taken + before switching. + + + + + Indicates the number of GPBiCG steps should be taken + before switching. + + + + + Gets or sets the number of steps taken with the BiCgStab algorithm + before switching over to the GPBiCG algorithm. + + + + + Gets or sets the number of steps taken with the GPBiCG algorithm + before switching over to the BiCgStab algorithm. + + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Decide if to do steps with BiCgStab + + Number of iteration + true if yes, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + An incomplete, level 0, LU factorization preconditioner. + + + The ILU(0) algorithm was taken from:
+ Iterative methods for sparse linear systems
+ Yousef Saad
+ Algorithm is described in Chapter 10, section 10.3.2, page 275
+
+
+ + + The matrix holding the lower (L) and upper (U) matrices. The + decomposition matrices are combined to reduce storage. + + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + A new matrix containing the lower triagonal elements. + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + This class performs an Incomplete LU factorization with drop tolerance + and partial pivoting. The drop tolerance indicates which additional entries + will be dropped from the factorized LU matrices. + + + The ILUTP-Mem algorithm was taken from:
+ ILUTP_Mem: a Space-Efficient Incomplete LU Preconditioner +
+ Tzu-Yi Chen, Department of Mathematics and Computer Science,
+ Pomona College, Claremont CA 91711, USA
+ Published in:
+ Lecture Notes in Computer Science
+ Volume 3046 / 2004
+ pp. 20 - 28
+ Algorithm is described in Section 2, page 22 +
+
+ + + The default fill level. + + + + + The default drop tolerance. + + + + + The decomposed upper triangular matrix. + + + + + The decomposed lower triangular matrix. + + + + + The array containing the pivot values. + + + + + The fill level. + + + + + The drop tolerance. + + + + + The pivot tolerance. + + + + + Initializes a new instance of the class with the default settings. + + + + + Initializes a new instance of the class with the specified settings. + + + The amount of fill that is allowed in the matrix. The value is a fraction of + the number of non-zero entries in the original matrix. Values should be positive. + + + The absolute drop tolerance which indicates below what absolute value an entry + will be dropped from the matrix. A drop tolerance of 0.0 means that no values + will be dropped. Values should always be positive. + + + The pivot tolerance which indicates at what level pivoting will take place. A + value of 0.0 means that no pivoting will take place. + + + + + Gets or sets the amount of fill that is allowed in the matrix. The + value is a fraction of the number of non-zero entries in the original + matrix. The standard value is 200. + + + + Values should always be positive and can be higher than 1.0. A value lower + than 1.0 means that the eventual preconditioner matrix will have fewer + non-zero entries as the original matrix. A value higher than 1.0 means that + the eventual preconditioner can have more non-zero values than the original + matrix. + + + Note that any changes to the FillLevel after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the absolute drop tolerance which indicates below what absolute value + an entry will be dropped from the matrix. The standard value is 0.0001. + + + + The values should always be positive and can be larger than 1.0. A low value will + keep more small numbers in the preconditioner matrix. A high value will remove + more small numbers from the preconditioner matrix. + + + Note that any changes to the DropTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the pivot tolerance which indicates at what level pivoting will + take place. The standard value is 0.0 which means pivoting will never take place. + + + + The pivot tolerance is used to calculate if pivoting is necessary. Pivoting + will take place if any of the values in a row is bigger than the + diagonal value of that row divided by the pivot tolerance, i.e. pivoting + will take place if row(i,j) > row(i,i) / PivotTolerance for + any j that is not equal to i. + + + Note that any changes to the PivotTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the lower triagonal elements. + + + + Returns the pivot array. This array is not needed for normal use because + the preconditioner will return the solution vector values in the proper order. + + + This method is used for debugging purposes only and should normally not be used. + + The pivot array. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. Note that the + method takes a general matrix type. However internally the data is stored + as a sparse matrix. Therefore it is not recommended to pass a dense matrix. + + If is . + If is not a square matrix. + + + + Pivot elements in the according to internal pivot array + + Row to pivot in + + + + Was pivoting already performed + + Pivots already done + Current item to pivot + true if performed, otherwise false + + + + Swap columns in the + + Source . + First column index to swap + Second column index to swap + + + + Sort vector descending, not changing vector but placing sorted indicies to + + Start sort form + Sort till upper bound + Array with sorted vector indicies + Source + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + Pivot elements in according to internal pivot array + + Source . + Result after pivoting. + + + + An element sort algorithm for the class. + + + This sort algorithm is used to sort the columns in a sparse matrix based on + the value of the element on the diagonal of the matrix. + + + + + Sorts the elements of the vector in decreasing + fashion. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Sorts the elements of the vector in decreasing + fashion using heap sort algorithm. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Build heap for double indicies + + Root position + Length of + Indicies of + Target + + + + Sift double indicies + + Indicies of + Target + Root position + Length of + + + + Sorts the given integers in a decreasing fashion. + + The values. + + + + Sort the given integers in a decreasing fashion using heapsort algorithm + + Array of values to sort + Length of + + + + Build heap + + Target values array + Root position + Length of + + + + Sift values + + Target value array + Root position + Length of + + + + Exchange values in array + + Target values array + First value to exchange + Second value to exchange + + + + A simple milu(0) preconditioner. + + + Original Fortran code by Youcef Saad (07 January 2004) + + + + Use modified or standard ILU(0) + + + + Gets or sets a value indicating whether to use modified or standard ILU(0). + + + + + Gets a value indicating whether the preconditioner is initialized. + + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square or is not an + instance of SparseCompressedRowMatrixStorage. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector b. + The left hand side vector x. + + + + MILU0 is a simple milu(0) preconditioner. + + Order of the matrix. + Matrix values in CSR format (input). + Column indices (input). + Row pointers (input). + Matrix values in MSR format (output). + Row pointers and column indices (output). + Pointer to diagonal elements (output). + True if the modified/MILU algorithm should be used (recommended) + Returns 0 on success or k > 0 if a zero pivot was encountered at step k. + + + + A Multiple-Lanczos Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Multiple-Lanczos Bi-Conjugate Gradient stabilized (ML(k)-BiCGStab) solver is an 'improvement' + of the standard BiCgStab solver. + + + The algorithm was taken from:
+ ML(k)BiCGSTAB: A BiCGSTAB variant based on multiple Lanczos starting vectors +
+ Man-chung Yeung and Tony F. Chan +
+ SIAM Journal of Scientific Computing +
+ Volume 21, Number 4, pp. 1263 - 1290 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + The default number of starting vectors. + + + + + The collection of starting vectors which are used as the basis for the Krylov sub-space. + + + + + The number of starting vectors used by the algorithm + + + + + Gets or sets the number of starting vectors. + + + Must be larger than 1 and smaller than the number of variables in the matrix that + for which this solver will be used. + + + + + Resets the number of starting vectors to the default value. + + + + + Gets or sets a series of orthonormal vectors which will be used as basis for the + Krylov sub-space. + + + + + Gets the number of starting vectors to create + + Maximum number + Number of variables + Number of starting vectors to create + + + + Returns an array of starting vectors. + + The maximum number of starting vectors that should be created. + The number of variables. + + An array with starting vectors. The array will never be larger than the + but it may be smaller if + the is smaller than + the . + + + + + Create random vectors array + + Number of vectors + Size of each vector + Array of random vectors + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Source A. + Residual data. + x data. + b data. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Transpose Free Quasi-Minimal Residual (TFQMR) iterative matrix solver. + + + + The TFQMR algorithm was taken from:
+ Iterative methods for sparse linear systems. +
+ Yousef Saad +
+ Algorithm is described in Chapter 7, section 7.4.3, page 219 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Is even? + + Number to check + true if even, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Matrix with sparse storage, intended for very large matrices where most of the cells are zero. + The underlying storage scheme is 3-array compressed-sparse-row (CSR) Format. + Wikipedia - CSR. + + + + + Gets the number of non zero elements in the matrix. + + The number of non zero elements. + + + + Create a new sparse matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new sparse matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable. + The enumerable is assumed to be in row-major order (row by row). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + + Create a new sparse matrix with the given number of rows and columns as a copy of the given array. + The array is assumed to be in column-major order (column by column). + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix and initialize each value to the same provided value. + + + + + Create a new sparse matrix and initialize each value using the provided init function. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Evaluates whether this matrix is symmetric. + + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + A vector with sparse storage, intended for very large vectors where most of the cells are zero. + + The sparse vector is not thread safe. + + + + Gets the number of non zero elements in the vector. + + The number of non zero elements. + + + + Create a new sparse vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new sparse vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new sparse vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector and initialize each value using the provided value. + + + + + Create a new sparse vector and initialize each value using the provided init function. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + Warning, the new 'sparse vector' with a non-zero scalar added to it will be a 100% filled + sparse vector and very inefficient. Would be better to work with a dense vector instead. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Negates vector and saves result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Multiplies a vector with a scalar. + + The vector to scale. + The scalar value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a scalar. + + The scalar value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a scalar. + + The vector to divide. + The scalar value. + The result of the division. + If is . + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + of each element of the vector of the given divisor. + + The vector whose elements we want to compute the modulus of. + The divisor to use, + The result of the calculation + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Creates a float sparse vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n,n,..', '(n,n,..)', '[n,n,...]', where n is a float. + + + A float sparse vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a real sparse vector to float-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a real sparse vector to float-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a real vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + float version of the class. + + + + + Initializes a new instance of the Vector class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Conjugates vector and save result to + + Target vector + + + + Negates vector and saves result to + + Target vector + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Divides each element of the vector by a scalar and stores the result in the result vector. + + + The scalar to divide with. + + + The vector to store the result of the division. + + + + + Divides a scalar by each element of the vector and stores the result in the result vector. + + The scalar to divide. + The vector to store the result of the division. + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise raise this vector to an exponent and store the result into the result vector. + + The exponent to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Returns the value of the absolute minimum element. + + The value of the absolute minimum element. + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the value of the absolute maximum element. + + The value of the absolute maximum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + + The p value. + + + Scalar ret = ( ∑|At(i)|^p )^(1/p) + + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Normalizes this vector to a unit vector with respect to the p-norm. + + + The p value. + + + This vector normalized to a unit vector with respect to the p-norm. + + + + + A Matrix class with dense storage. The underlying storage is a one dimensional array in column-major order (column by column). + + + + + Number of rows. + + Using this instead of the RowCount property to speed up calculating + a matrix index in the data array. + + + + Number of columns. + + Using this instead of the ColumnCount property to speed up calculating + a matrix index in the data array. + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new dense matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new dense matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to be in column-major order (column by column) and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + + Create a new dense matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable. + The enumerable is assumed to be in column-major order (column by column). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix and initialize each value to the same provided value. + + + + + Create a new dense matrix and initialize each value using the provided init function. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new dense matrix with values sampled from the provided random distribution. + + + + + Gets the matrix's data. + + The matrix's data. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of add + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the matrix and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Evaluates whether this matrix is symmetric. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A vector using dense storage. + + + + + Number of elements + + + + + Gets the vector's data. + + + + + Create a new dense vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new dense vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new dense vector directly binding to a raw array. + The array is used directly without copying. + Very efficient, but changes to the array and the vector will affect each other. + + + + + Create a new dense vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector and initialize each value using the provided value. + + + + + Create a new dense vector and initialize each value using the provided init function. + + + + + Create a new dense vector with values sampled from the provided random distribution. + + + + + Gets the vector's data. + + The vector's data. + + + + Returns a reference to the internal data structure. + + The DenseVector whose internal data we are + returning. + + A reference to the internal date of the given vector. + + + + + Returns a vector bound directly to a reference of the provided array. + + The array to bind to the DenseVector object. + + A DenseVector whose values are bound to the given array. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + The scalar to add. + The vector to store the result of the addition. + + + + Adds another vector to this vector and stores the result into the result vector. + + The vector to add to this one. + The vector to store the result of the addition. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The vector to store the result of the subtraction. + + + + Subtracts another vector from this vector and stores the result into the result vector. + + The vector to subtract from this one. + The vector to store the result of the subtraction. + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Negates vector and saves result to + + Target vector + + + + Conjugates vector and save result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + The scalar to multiply. + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Multiplies a vector with a complex. + + The vector to scale. + The Complex value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a complex. + + The Complex value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a complex. + + The vector to divide. + The Complex value. + The result of the division. + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Creates a Complex dense vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n;n;..', '(n;n;..)', '[n;n;...]', where n is a double. + + + A Complex dense vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a complex dense vector to double-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a complex dense vector to double-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + A matrix type for diagonal matrices. + + + Diagonal matrices can be non-square matrices but the diagonal always starts + at element 0,0. A diagonal matrix will throw an exception if non diagonal + entries are set. The exception to this is when the off diagonal elements are + 0.0 or NaN; these settings will cause no change to the diagonal matrix. + + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new diagonal matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All diagonal cells of the matrix will be initialized to the provided value, all non-diagonal ones to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to contain the diagonal elements only and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new diagonal matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + The matrix to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + The array to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided enumerable. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new diagonal matrix with diagonal values sampled from the provided random distribution. + + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + If the result matrix's dimensions are not the same as this matrix. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to add. + The matrix to store the result of the division. + + + + Computes the determinant of this matrix. + + The determinant of this matrix. + + + + Returns the elements of the diagonal in a . + + The elements of the diagonal. + For non-square matrices, the method returns Min(Rows, Columns) elements where + i == j (i is the row index, and j is the column index). + + + + Copies the values of the given array to the diagonal. + + The array to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + + Copies the values of the given to the diagonal. + + The vector to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced L2 norm of the matrix. + The largest singular value of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the Frobenius norm of this matrix. + The Frobenius norm of this matrix. + + + Calculates the condition number of this matrix. + The condition number of the matrix. + + + Computes the inverse of this matrix. + If is not a square matrix. + If is singular. + The inverse of this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Creates a matrix that contains the values from the requested sub-matrix. + + The row to start copying from. + The number of rows to copy. Must be positive. + The column to start copying from. + The number of columns to copy. Must be positive. + The requested sub-matrix. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + If or + is not positive. + + + + Permute the columns of a matrix according to a permutation. + + The column permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Permute the rows of a matrix according to a permutation. + + The row permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Evaluates whether this matrix is symmetric. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A class which encapsulates the functionality of a Cholesky factorization. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Gets the determinant of the matrix for which the Cholesky matrix was computed. + + + + + Gets the log determinant of the matrix for which the Cholesky matrix was computed. + + + + + A class which encapsulates the functionality of a Cholesky factorization for dense matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a complex matrix. + + + If A is hermitan, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is hermitan. + I.e. A = V*D*V' and V*VH=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Reduces a complex hermitian matrix to a real symmetric tridiagonal matrix using unitary similarity transformations. + + Source matrix to reduce + Output: Arrays for internal storage of real parts of eigenvalues + Output: Arrays for internal storage of imaginary parts of eigenvalues + Output: Arrays that contains further information about the transformations. + Order of initial matrix + This is derived from the Algol procedures HTRIDI by + Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + Data array of matrix V (eigenvectors) + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Determines eigenvectors by undoing the symmetric tridiagonalize transformation + + Data array of matrix V (eigenvectors) + Previously tridiagonalized matrix by . + Contains further information about the transformations + Input matrix order + This is derived from the Algol procedures HTRIBK, by + by Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Nonsymmetric reduction to Hessenberg form. + + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + Data array of the eigenvectors + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any complex square matrix A may be decomposed as A = QR where Q is an unitary mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an unitary matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Factorize matrix using the modified Gram-Schmidt method. + + Initial matrix. On exit is replaced by Q. + Number of rows in Q. + Number of columns in Q. + On exit is filled by R. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Gets or sets Tau vector. Contains additional information on Q - used for native solver. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The type of QR factorization to perform. + If is null. + If row count is less then column count + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + If SVD algorithm failed to converge with matrix . + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Gets the absolute value of determinant of the square matrix for which the EVD was computed. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + In the Math.Net implementation we also store a set of pivot elements for increased + numerical stability. The pivot elements encode a permutation matrix P such that P*A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Gets the determinant of the matrix for which the LU factorization was computed. + + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A (m x n) may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + If a factorization is peformed, the resulting Q matrix is an m x m matrix + and the R matrix is an m x n matrix. If a factorization is performed, the + resulting Q matrix is an m x n matrix and the R matrix is an n x n matrix. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD). + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets the two norm of the . + + The 2-norm of the . + + + + Gets the condition number max(S) / min(S) + + The condition number. + + + + Gets the determinant of the square matrix for which the SVD was computed. + + + + + A class which encapsulates the functionality of a Cholesky factorization for user matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a complex matrix. + + + If A is hermitan, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is hermitan. + I.e. A = V*D*V' and V*VH=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Reduces a complex hermitian matrix to a real symmetric tridiagonal matrix using unitary similarity transformations. + + Source matrix to reduce + Output: Arrays for internal storage of real parts of eigenvalues + Output: Arrays for internal storage of imaginary parts of eigenvalues + Output: Arrays that contains further information about the transformations. + Order of initial matrix + This is derived from the Algol procedures HTRIDI by + Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + The eigen vectors to work on. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Determines eigenvectors by undoing the symmetric tridiagonalize transformation + + The eigen vectors to work on. + Previously tridiagonalized matrix by . + Contains further information about the transformations + Input matrix order + This is derived from the Algol procedures HTRIBK, by + by Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Nonsymmetric reduction to Hessenberg form. + + The eigen vectors to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + The eigen vectors to work on. + The eigen values to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any complex square matrix A may be decomposed as A = QR where Q is an unitary mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an unitary matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The QR factorization method to use. + If is null. + + + + Generate column from initial matrix to work array + + Initial matrix + The first row + Column index + Generated vector + + + + Perform calculation of Q or R + + Work array + Q or R matrices + The first row + The last row + The first column + The last column + Number of available CPUs + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + + + + + Calculates absolute value of multiplied on signum function of + + Complex value z1 + Complex value z2 + Result multiplication of signum function and absolute value + + + + Interchanges two vectors and + + Source matrix + The number of rows in + Column A index to swap + Column B index to swap + + + + Scale column by starting from row + + Source matrix + The number of rows in + Column to scale + Row to scale from + Scale value + + + + Scale vector by starting from index + + Source vector + Row to scale from + Scale value + + + + Given the Cartesian coordinates (da, db) of a point p, these fucntion return the parameters da, db, c, and s + associated with the Givens rotation that zeros the y-coordinate of the point. + + Provides the x-coordinate of the point p. On exit contains the parameter r associated with the Givens rotation + Provides the y-coordinate of the point p. On exit contains the parameter z associated with the Givens rotation + Contains the parameter c associated with the Givens rotation + Contains the parameter s associated with the Givens rotation + This is equivalent to the DROTG LAPACK routine. + + + + Calculate Norm 2 of the column in matrix starting from row + + Source matrix + The number of rows in + Column index + Start row index + Norm2 (Euclidean norm) of the column + + + + Calculate Norm 2 of the vector starting from index + + Source vector + Start index + Norm2 (Euclidean norm) of the vector + + + + Calculate dot product of and conjugating the first vector. + + Source matrix + The number of rows in + Index of column A + Index of column B + Starting row index + Dot product value + + + + Performs rotation of points in the plane. Given two vectors x and y , + each vector element of these vectors is replaced as follows: x(i) = c*x(i) + s*y(i); y(i) = c*y(i) - s*x(i) + + Source matrix + The number of rows in + Index of column A + Index of column B + scalar cos value + scalar sin value + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Complex version of the class. + + + + + Initializes a new instance of the Matrix class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Returns the conjugate transpose of this matrix. + + The conjugate transpose of this matrix. + + + + Puts the conjugate transpose of this matrix into the result matrix. + + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to divide by each element of the matrix. + The matrix to store the result of the division. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The matrix to store the result of the pointwise power. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Pointwise applies the exponential function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Computes the Moore-Penrose Pseudo-Inverse of this matrix. + + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Calculates the p-norms of all row vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the p-norms of all column vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all row vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all column vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the value sum of each row vector. + + + + + Calculates the absolute value sum of each row vector. + + + + + Calculates the value sum of each column vector. + + + + + Calculates the absolute value sum of each column vector. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Bi-Conjugate Gradient Stabilized (BiCGStab) solver is an 'improvement' + of the standard Conjugate Gradient (CG) solver. Unlike the CG solver the + BiCGStab can be used on non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The Bi-CGSTAB algorithm was taken from:
+ Templates for the solution of linear systems: Building blocks + for iterative methods +
+ Richard Barrett, Michael Berry, Tony F. Chan, James Demmel, + June M. Donato, Jack Dongarra, Victor Eijkhout, Roldan Pozo, + Charles Romine and Henk van der Vorst +
+ Url: http://www.netlib.org/templates/Templates.html +
+ Algorithm is described in Chapter 2, section 2.3.8, page 27 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient , A. + The solution , b. + The result , x. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A composite matrix solver. The actual solver is made by a sequence of + matrix solvers. + + + + Solver based on:
+ Faster PDE-based simulations using robust composite linear solvers
+ S. Bhowmicka, P. Raghavan a,*, L. McInnes b, B. Norris
+ Future Generation Computer Systems, Vol 20, 2004, pp 373–387
+
+ + Note that if an iterator is passed to this solver it will be used for all the sub-solvers. + +
+
+ + + The collection of solvers that will be used + + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A diagonal preconditioner. The preconditioner uses the inverse + of the matrix diagonal as preconditioning values. + + + + + The inverse of the matrix diagonal. + + + + + Returns the decomposed matrix diagonal. + + The matrix diagonal. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + A Generalized Product Bi-Conjugate Gradient iterative matrix solver. + + + + The Generalized Product Bi-Conjugate Gradient (GPBiCG) solver is an + alternative version of the Bi-Conjugate Gradient stabilized (CG) solver. + Unlike the CG solver the GPBiCG solver can be used on + non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The GPBiCG algorithm was taken from:
+ GPBiCG(m,l): A hybrid of BiCGSTAB and GPBiCG methods with + efficiency and robustness +
+ S. Fujino +
+ Applied Numerical Mathematics, Volume 41, 2002, pp 107 - 117 +
+
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Indicates the number of BiCGStab steps should be taken + before switching. + + + + + Indicates the number of GPBiCG steps should be taken + before switching. + + + + + Gets or sets the number of steps taken with the BiCgStab algorithm + before switching over to the GPBiCG algorithm. + + + + + Gets or sets the number of steps taken with the GPBiCG algorithm + before switching over to the BiCgStab algorithm. + + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Decide if to do steps with BiCgStab + + Number of iteration + true if yes, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + An incomplete, level 0, LU factorization preconditioner. + + + The ILU(0) algorithm was taken from:
+ Iterative methods for sparse linear systems
+ Yousef Saad
+ Algorithm is described in Chapter 10, section 10.3.2, page 275
+
+
+ + + The matrix holding the lower (L) and upper (U) matrices. The + decomposition matrices are combined to reduce storage. + + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + A new matrix containing the lower triagonal elements. + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + This class performs an Incomplete LU factorization with drop tolerance + and partial pivoting. The drop tolerance indicates which additional entries + will be dropped from the factorized LU matrices. + + + The ILUTP-Mem algorithm was taken from:
+ ILUTP_Mem: a Space-Efficient Incomplete LU Preconditioner +
+ Tzu-Yi Chen, Department of Mathematics and Computer Science,
+ Pomona College, Claremont CA 91711, USA
+ Published in:
+ Lecture Notes in Computer Science
+ Volume 3046 / 2004
+ pp. 20 - 28
+ Algorithm is described in Section 2, page 22 +
+
+ + + The default fill level. + + + + + The default drop tolerance. + + + + + The decomposed upper triangular matrix. + + + + + The decomposed lower triangular matrix. + + + + + The array containing the pivot values. + + + + + The fill level. + + + + + The drop tolerance. + + + + + The pivot tolerance. + + + + + Initializes a new instance of the class with the default settings. + + + + + Initializes a new instance of the class with the specified settings. + + + The amount of fill that is allowed in the matrix. The value is a fraction of + the number of non-zero entries in the original matrix. Values should be positive. + + + The absolute drop tolerance which indicates below what absolute value an entry + will be dropped from the matrix. A drop tolerance of 0.0 means that no values + will be dropped. Values should always be positive. + + + The pivot tolerance which indicates at what level pivoting will take place. A + value of 0.0 means that no pivoting will take place. + + + + + Gets or sets the amount of fill that is allowed in the matrix. The + value is a fraction of the number of non-zero entries in the original + matrix. The standard value is 200. + + + + Values should always be positive and can be higher than 1.0. A value lower + than 1.0 means that the eventual preconditioner matrix will have fewer + non-zero entries as the original matrix. A value higher than 1.0 means that + the eventual preconditioner can have more non-zero values than the original + matrix. + + + Note that any changes to the FillLevel after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the absolute drop tolerance which indicates below what absolute value + an entry will be dropped from the matrix. The standard value is 0.0001. + + + + The values should always be positive and can be larger than 1.0. A low value will + keep more small numbers in the preconditioner matrix. A high value will remove + more small numbers from the preconditioner matrix. + + + Note that any changes to the DropTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the pivot tolerance which indicates at what level pivoting will + take place. The standard value is 0.0 which means pivoting will never take place. + + + + The pivot tolerance is used to calculate if pivoting is necessary. Pivoting + will take place if any of the values in a row is bigger than the + diagonal value of that row divided by the pivot tolerance, i.e. pivoting + will take place if row(i,j) > row(i,i) / PivotTolerance for + any j that is not equal to i. + + + Note that any changes to the PivotTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the lower triagonal elements. + + + + Returns the pivot array. This array is not needed for normal use because + the preconditioner will return the solution vector values in the proper order. + + + This method is used for debugging purposes only and should normally not be used. + + The pivot array. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. Note that the + method takes a general matrix type. However internally the data is stored + as a sparse matrix. Therefore it is not recommended to pass a dense matrix. + + If is . + If is not a square matrix. + + + + Pivot elements in the according to internal pivot array + + Row to pivot in + + + + Was pivoting already performed + + Pivots already done + Current item to pivot + true if performed, otherwise false + + + + Swap columns in the + + Source . + First column index to swap + Second column index to swap + + + + Sort vector descending, not changing vector but placing sorted indicies to + + Start sort form + Sort till upper bound + Array with sorted vector indicies + Source + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + Pivot elements in according to internal pivot array + + Source . + Result after pivoting. + + + + An element sort algorithm for the class. + + + This sort algorithm is used to sort the columns in a sparse matrix based on + the value of the element on the diagonal of the matrix. + + + + + Sorts the elements of the vector in decreasing + fashion. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Sorts the elements of the vector in decreasing + fashion using heap sort algorithm. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Build heap for double indicies + + Root position + Length of + Indicies of + Target + + + + Sift double indicies + + Indicies of + Target + Root position + Length of + + + + Sorts the given integers in a decreasing fashion. + + The values. + + + + Sort the given integers in a decreasing fashion using heapsort algorithm + + Array of values to sort + Length of + + + + Build heap + + Target values array + Root position + Length of + + + + Sift values + + Target value array + Root position + Length of + + + + Exchange values in array + + Target values array + First value to exchange + Second value to exchange + + + + A simple milu(0) preconditioner. + + + Original Fortran code by Youcef Saad (07 January 2004) + + + + Use modified or standard ILU(0) + + + + Gets or sets a value indicating whether to use modified or standard ILU(0). + + + + + Gets a value indicating whether the preconditioner is initialized. + + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square or is not an + instance of SparseCompressedRowMatrixStorage. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector b. + The left hand side vector x. + + + + MILU0 is a simple milu(0) preconditioner. + + Order of the matrix. + Matrix values in CSR format (input). + Column indices (input). + Row pointers (input). + Matrix values in MSR format (output). + Row pointers and column indices (output). + Pointer to diagonal elements (output). + True if the modified/MILU algorithm should be used (recommended) + Returns 0 on success or k > 0 if a zero pivot was encountered at step k. + + + + A Multiple-Lanczos Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Multiple-Lanczos Bi-Conjugate Gradient stabilized (ML(k)-BiCGStab) solver is an 'improvement' + of the standard BiCgStab solver. + + + The algorithm was taken from:
+ ML(k)BiCGSTAB: A BiCGSTAB variant based on multiple Lanczos starting vectors +
+ Man-chung Yeung and Tony F. Chan +
+ SIAM Journal of Scientific Computing +
+ Volume 21, Number 4, pp. 1263 - 1290 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + The default number of starting vectors. + + + + + The collection of starting vectors which are used as the basis for the Krylov sub-space. + + + + + The number of starting vectors used by the algorithm + + + + + Gets or sets the number of starting vectors. + + + Must be larger than 1 and smaller than the number of variables in the matrix that + for which this solver will be used. + + + + + Resets the number of starting vectors to the default value. + + + + + Gets or sets a series of orthonormal vectors which will be used as basis for the + Krylov sub-space. + + + + + Gets the number of starting vectors to create + + Maximum number + Number of variables + Number of starting vectors to create + + + + Returns an array of starting vectors. + + The maximum number of starting vectors that should be created. + The number of variables. + + An array with starting vectors. The array will never be larger than the + but it may be smaller if + the is smaller than + the . + + + + + Create random vecrors array + + Number of vectors + Size of each vector + Array of random vectors + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Source A. + Residual data. + x data. + b data. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Transpose Free Quasi-Minimal Residual (TFQMR) iterative matrix solver. + + + + The TFQMR algorithm was taken from:
+ Iterative methods for sparse linear systems. +
+ Yousef Saad +
+ Algorithm is described in Chapter 7, section 7.4.3, page 219 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Is even? + + Number to check + true if even, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Matrix with sparse storage, intended for very large matrices where most of the cells are zero. + The underlying storage scheme is 3-array compressed-sparse-row (CSR) Format. + Wikipedia - CSR. + + + + + Gets the number of non zero elements in the matrix. + + The number of non zero elements. + + + + Create a new sparse matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new sparse matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable. + The enumerable is assumed to be in row-major order (row by row). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + + Create a new sparse matrix with the given number of rows and columns as a copy of the given array. + The array is assumed to be in column-major order (column by column). + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix and initialize each value to the same provided value. + + + + + Create a new sparse matrix and initialize each value using the provided init function. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Evaluates whether this matrix is symmetric. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + A vector with sparse storage, intended for very large vectors where most of the cells are zero. + + The sparse vector is not thread safe. + + + + Gets the number of non zero elements in the vector. + + The number of non zero elements. + + + + Create a new sparse vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new sparse vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new sparse vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector and initialize each value using the provided value. + + + + + Create a new sparse vector and initialize each value using the provided init function. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + Warning, the new 'sparse vector' with a non-zero scalar added to it will be a 100% filled + sparse vector and very inefficient. Would be better to work with a dense vector instead. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Negates vector and saves result to + + Target vector + + + + Conjugates vector and save result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Multiplies a vector with a complex. + + The vector to scale. + The complex value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a complex. + + The complex value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a complex. + + The vector to divide. + The complex value. + The result of the division. + If is . + + + + Computes the modulus of each element of the vector of the given divisor. + + The vector whose elements we want to compute the modulus of. + The divisor to use, + The result of the calculation + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Creates a double sparse vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n;n;..', '(n;n;..)', '[n;n;...]', where n is a Complex. + + + A double sparse vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a complex sparse vector to double-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a complex sparse vector to double-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Complex version of the class. + + + + + Initializes a new instance of the Vector class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Conjugates vector and save result to + + Target vector + + + + Negates vector and saves result to + + Target vector + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Divides each element of the vector by a scalar and stores the result in the result vector. + + + The scalar to divide with. + + + The vector to store the result of the division. + + + + + Divides a scalar by each element of the vector and stores the result in the result vector. + + The scalar to divide. + The vector to store the result of the division. + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise raise this vector to an exponent and store the result into the result vector. + + The exponent to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Returns the value of the absolute minimum element. + + The value of the absolute minimum element. + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the value of the absolute maximum element. + + The value of the absolute maximum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + + The p value. + + + Scalar ret = ( ∑|At(i)|^p )^(1/p) + + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Normalizes this vector to a unit vector with respect to the p-norm. + + + The p value. + + + This vector normalized to a unit vector with respect to the p-norm. + + + + + A Matrix class with dense storage. The underlying storage is a one dimensional array in column-major order (column by column). + + + + + Number of rows. + + Using this instead of the RowCount property to speed up calculating + a matrix index in the data array. + + + + Number of columns. + + Using this instead of the ColumnCount property to speed up calculating + a matrix index in the data array. + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new dense matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new dense matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to be in column-major order (column by column) and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + + Create a new dense matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable. + The enumerable is assumed to be in column-major order (column by column). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix and initialize each value to the same provided value. + + + + + Create a new dense matrix and initialize each value using the provided init function. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new dense matrix with values sampled from the provided random distribution. + + + + + Gets the matrix's data. + + The matrix's data. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of add + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the matrix and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Evaluates whether this matrix is symmetric. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A vector using dense storage. + + + + + Number of elements + + + + + Gets the vector's data. + + + + + Create a new dense vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new dense vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new dense vector directly binding to a raw array. + The array is used directly without copying. + Very efficient, but changes to the array and the vector will affect each other. + + + + + Create a new dense vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector and initialize each value using the provided value. + + + + + Create a new dense vector and initialize each value using the provided init function. + + + + + Create a new dense vector with values sampled from the provided random distribution. + + + + + Gets the vector's data. + + The vector's data. + + + + Returns a reference to the internal data structure. + + The DenseVector whose internal data we are + returning. + + A reference to the internal date of the given vector. + + + + + Returns a vector bound directly to a reference of the provided array. + + The array to bind to the DenseVector object. + + A DenseVector whose values are bound to the given array. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + The scalar to add. + The vector to store the result of the addition. + + + + Adds another vector to this vector and stores the result into the result vector. + + The vector to add to this one. + The vector to store the result of the addition. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The vector to store the result of the subtraction. + + + + Subtracts another vector from this vector and stores the result into the result vector. + + The vector to subtract from this one. + The vector to store the result of the subtraction. + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Negates vector and saves result to + + Target vector + + + + Conjugates vector and save result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + The scalar to multiply. + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Multiplies a vector with a complex. + + The vector to scale. + The Complex32 value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a complex. + + The Complex32 value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a complex. + + The vector to divide. + The Complex32 value. + The result of the division. + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Creates a Complex32 dense vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n;n;..', '(n;n;..)', '[n;n;...]', where n is a double. + + + A Complex32 dense vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a complex dense vector to double-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a complex dense vector to double-precision dense vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + A matrix type for diagonal matrices. + + + Diagonal matrices can be non-square matrices but the diagonal always starts + at element 0,0. A diagonal matrix will throw an exception if non diagonal + entries are set. The exception to this is when the off diagonal elements are + 0.0 or NaN; these settings will cause no change to the diagonal matrix. + + + + + Gets the matrix's data. + + The matrix's data. + + + + Create a new diagonal matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns. + All diagonal cells of the matrix will be initialized to the provided value, all non-diagonal ones to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new diagonal matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to contain the diagonal elements only and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new diagonal matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + The matrix to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + The array to copy from must be diagonal as well. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value from the provided enumerable. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Create a new diagonal matrix with diagonal values sampled from the provided random distribution. + + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to add. + The matrix to store the result of the division. + + + + Computes the determinant of this matrix. + + The determinant of this matrix. + + + + Returns the elements of the diagonal in a . + + The elements of the diagonal. + For non-square matrices, the method returns Min(Rows, Columns) elements where + i == j (i is the row index, and j is the column index). + + + + Copies the values of the given array to the diagonal. + + The array to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + + Copies the values of the given to the diagonal. + + The vector to copy the values from. The length of the vector should be + Min(Rows, Columns). + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced L2 norm of the matrix. + The largest singular value of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + Calculates the condition number of this matrix. + The condition number of the matrix. + + + Computes the inverse of this matrix. + If is not a square matrix. + If is singular. + The inverse of this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If the result matrix's dimensions are not the same as this matrix. + + + + Creates a matrix that contains the values from the requested sub-matrix. + + The row to start copying from. + The number of rows to copy. Must be positive. + The column to start copying from. + The number of columns to copy. Must be positive. + The requested sub-matrix. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + If or + is not positive. + + + + Permute the columns of a matrix according to a permutation. + + The column permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Permute the rows of a matrix according to a permutation. + + The row permutation to apply to this matrix. + Always thrown + Permutation in diagonal matrix are senseless, because of matrix nature + + + + Evaluates whether this matrix is symmetric. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A class which encapsulates the functionality of a Cholesky factorization. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Gets the determinant of the matrix for which the Cholesky matrix was computed. + + + + + Gets the log determinant of the matrix for which the Cholesky matrix was computed. + + + + + A class which encapsulates the functionality of a Cholesky factorization for dense matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a complex matrix. + + + If A is hermitan, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is hermitan. + I.e. A = V*D*V' and V*VH=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Reduces a complex hermitian matrix to a real symmetric tridiagonal matrix using unitary similarity transformations. + + Source matrix to reduce + Output: Arrays for internal storage of real parts of eigenvalues + Output: Arrays for internal storage of imaginary parts of eigenvalues + Output: Arrays that contains further information about the transformations. + Order of initial matrix + This is derived from the Algol procedures HTRIDI by + Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + Data array of matrix V (eigenvectors) + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Determines eigenvectors by undoing the symmetric tridiagonalize transformation + + Data array of matrix V (eigenvectors) + Previously tridiagonalized matrix by . + Contains further information about the transformations + Input matrix order + This is derived from the Algol procedures HTRIBK, by + by Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Nonsymmetric reduction to Hessenberg form. + + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + Data array of the eigenvectors + Data array of matrix V (eigenvectors) + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any complex square matrix A may be decomposed as A = QR where Q is an unitary mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an unitary matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Factorize matrix using the modified Gram-Schmidt method. + + Initial matrix. On exit is replaced by Q. + Number of rows in Q. + Number of columns in Q. + On exit is filled by R. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Gets or sets Tau vector. Contains additional information on Q - used for native solver. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The QR factorization method to use. + If is null. + If row count is less then column count + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + If SVD algorithm failed to converge with matrix . + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Gets the absolute value of determinant of the square matrix for which the EVD was computed. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + In the Math.Net implementation we also store a set of pivot elements for increased + numerical stability. The pivot elements encode a permutation matrix P such that P*A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Gets the determinant of the matrix for which the LU factorization was computed. + + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A (m x n) may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + If a factorization is peformed, the resulting Q matrix is an m x m matrix + and the R matrix is an m x n matrix. If a factorization is performed, the + resulting Q matrix is an m x n matrix and the R matrix is an n x n matrix. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD). + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets the two norm of the . + + The 2-norm of the . + + + + Gets the condition number max(S) / min(S) + + The condition number. + + + + Gets the determinant of the square matrix for which the SVD was computed. + + + + + A class which encapsulates the functionality of a Cholesky factorization for user matrices. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + + + + Initializes a new instance of the class. This object will compute the + Cholesky factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + If is not positive definite. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a complex matrix. + + + If A is hermitan, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is hermitan. + I.e. A = V*D*V' and V*VH=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + + + + Initializes a new instance of the class. This object will compute the + the eigenvalue decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + If it is known whether the matrix is symmetric or not the routine can skip checking it itself. + If is null. + If EVD algorithm failed to converge with matrix . + + + + Reduces a complex hermitian matrix to a real symmetric tridiagonal matrix using unitary similarity transformations. + + Source matrix to reduce + Output: Arrays for internal storage of real parts of eigenvalues + Output: Arrays for internal storage of imaginary parts of eigenvalues + Output: Arrays that contains further information about the transformations. + Order of initial matrix + This is derived from the Algol procedures HTRIDI by + Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Symmetric tridiagonal QL algorithm. + + The eigen vectors to work on. + Arrays for internal storage of real parts of eigenvalues + Arrays for internal storage of imaginary parts of eigenvalues + Order of initial matrix + This is derived from the Algol procedures tql2, by + Bowdler, Martin, Reinsch, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + + Determines eigenvectors by undoing the symmetric tridiagonalize transformation + + The eigen vectors to work on. + Previously tridiagonalized matrix by . + Contains further information about the transformations + Input matrix order + This is derived from the Algol procedures HTRIBK, by + by Smith, Boyle, Dongarra, Garbow, Ikebe, Klema, Moler, and Wilkinson, Handbook for + Auto. Comp., Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Nonsymmetric reduction to Hessenberg form. + + The eigen vectors to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedures orthes and ortran, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutines in EISPACK. + + + + Nonsymmetric reduction from Hessenberg to real Schur form. + + The eigen vectors to work on. + The eigen values to work on. + Array for internal storage of nonsymmetric Hessenberg form. + Order of initial matrix + This is derived from the Algol procedure hqr2, + by Martin and Wilkinson, Handbook for Auto. Comp., + Vol.ii-Linear Algebra, and the corresponding + Fortran subroutine in EISPACK. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any complex square matrix A may be decomposed as A = QR where Q is an unitary mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + + + + Initializes a new instance of the class. This object creates an unitary matrix + using the modified Gram-Schmidt method. + + The matrix to factor. + If is null. + If row count is less then column count + If is rank deficient + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + + + The computation of the LU factorization is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + LU factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + If is null. + If is not a square matrix. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + + + + + Initializes a new instance of the class. This object will compute the + QR factorization when the constructor is called and cache it's factorization. + + The matrix to factor. + The QR factorization method to use. + If is null. + + + + Generate column from initial matrix to work array + + Initial matrix + The first row + Column index + Generated vector + + + + Perform calculation of Q or R + + Work array + Q or R matrices + The first row + The last row + The first column + The last column + Number of available CPUs + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD) for . + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + + + + Initializes a new instance of the class. This object will compute the + the singular value decomposition when the constructor is called and cache it's decomposition. + + The matrix to factor. + Compute the singular U and VT vectors or not. + If is null. + + + + + Calculates absolute value of multiplied on signum function of + + Complex32 value z1 + Complex32 value z2 + Result multiplication of signum function and absolute value + + + + Interchanges two vectors and + + Source matrix + The number of rows in + Column A index to swap + Column B index to swap + + + + Scale column by starting from row + + Source matrix + The number of rows in + Column to scale + Row to scale from + Scale value + + + + Scale vector by starting from index + + Source vector + Row to scale from + Scale value + + + + Given the Cartesian coordinates (da, db) of a point p, these fucntion return the parameters da, db, c, and s + associated with the Givens rotation that zeros the y-coordinate of the point. + + Provides the x-coordinate of the point p. On exit contains the parameter r associated with the Givens rotation + Provides the y-coordinate of the point p. On exit contains the parameter z associated with the Givens rotation + Contains the parameter c associated with the Givens rotation + Contains the parameter s associated with the Givens rotation + This is equivalent to the DROTG LAPACK routine. + + + + Calculate Norm 2 of the column in matrix starting from row + + Source matrix + The number of rows in + Column index + Start row index + Norm2 (Euclidean norm) of the column + + + + Calculate Norm 2 of the vector starting from index + + Source vector + Start index + Norm2 (Euclidean norm) of the vector + + + + Calculate dot product of and conjugating the first vector. + + Source matrix + The number of rows in + Index of column A + Index of column B + Starting row index + Dot product value + + + + Performs rotation of points in the plane. Given two vectors x and y , + each vector element of these vectors is replaced as follows: x(i) = c*x(i) + s*y(i); y(i) = c*y(i) - s*x(i) + + Source matrix + The number of rows in + Index of column A + Index of column B + scalar cos value + scalar sin value + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Complex32 version of the class. + + + + + Initializes a new instance of the Matrix class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Returns the conjugate transpose of this matrix. + + The conjugate transpose of this matrix. + + + + Puts the conjugate transpose of this matrix into the result matrix. + + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar to divide by each element of the matrix. + The matrix to store the result of the division. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The matrix to store the result of the pointwise power. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Pointwise applies the exponential function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Computes the Moore-Penrose Pseudo-Inverse of this matrix. + + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Calculates the p-norms of all row vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the p-norms of all column vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all row vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all column vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the value sum of each row vector. + + + + + Calculates the absolute value sum of each row vector. + + + + + Calculates the value sum of each column vector. + + + + + Calculates the absolute value sum of each column vector. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + A Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Bi-Conjugate Gradient Stabilized (BiCGStab) solver is an 'improvement' + of the standard Conjugate Gradient (CG) solver. Unlike the CG solver the + BiCGStab can be used on non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The Bi-CGSTAB algorithm was taken from:
+ Templates for the solution of linear systems: Building blocks + for iterative methods +
+ Richard Barrett, Michael Berry, Tony F. Chan, James Demmel, + June M. Donato, Jack Dongarra, Victor Eijkhout, Roldan Pozo, + Charles Romine and Henk van der Vorst +
+ Url: http://www.netlib.org/templates/Templates.html +
+ Algorithm is described in Chapter 2, section 2.3.8, page 27 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient , A. + The solution , b. + The result , x. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A composite matrix solver. The actual solver is made by a sequence of + matrix solvers. + + + + Solver based on:
+ Faster PDE-based simulations using robust composite linear solvers
+ S. Bhowmicka, P. Raghavan a,*, L. McInnes b, B. Norris
+ Future Generation Computer Systems, Vol 20, 2004, pp 373–387
+
+ + Note that if an iterator is passed to this solver it will be used for all the sub-solvers. + +
+
+ + + The collection of solvers that will be used + + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A diagonal preconditioner. The preconditioner uses the inverse + of the matrix diagonal as preconditioning values. + + + + + The inverse of the matrix diagonal. + + + + + Returns the decomposed matrix diagonal. + + The matrix diagonal. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + A Generalized Product Bi-Conjugate Gradient iterative matrix solver. + + + + The Generalized Product Bi-Conjugate Gradient (GPBiCG) solver is an + alternative version of the Bi-Conjugate Gradient stabilized (CG) solver. + Unlike the CG solver the GPBiCG solver can be used on + non-symmetric matrices.
+ Note that much of the success of the solver depends on the selection of the + proper preconditioner. +
+ + The GPBiCG algorithm was taken from:
+ GPBiCG(m,l): A hybrid of BiCGSTAB and GPBiCG methods with + efficiency and robustness +
+ S. Fujino +
+ Applied Numerical Mathematics, Volume 41, 2002, pp 107 - 117 +
+
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Indicates the number of BiCGStab steps should be taken + before switching. + + + + + Indicates the number of GPBiCG steps should be taken + before switching. + + + + + Gets or sets the number of steps taken with the BiCgStab algorithm + before switching over to the GPBiCG algorithm. + + + + + Gets or sets the number of steps taken with the GPBiCG algorithm + before switching over to the BiCgStab algorithm. + + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Decide if to do steps with BiCgStab + + Number of iteration + true if yes, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + An incomplete, level 0, LU factorization preconditioner. + + + The ILU(0) algorithm was taken from:
+ Iterative methods for sparse linear systems
+ Yousef Saad
+ Algorithm is described in Chapter 10, section 10.3.2, page 275
+
+
+ + + The matrix holding the lower (L) and upper (U) matrices. The + decomposition matrices are combined to reduce storage. + + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + A new matrix containing the lower triagonal elements. + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + This class performs an Incomplete LU factorization with drop tolerance + and partial pivoting. The drop tolerance indicates which additional entries + will be dropped from the factorized LU matrices. + + + The ILUTP-Mem algorithm was taken from:
+ ILUTP_Mem: a Space-Efficient Incomplete LU Preconditioner +
+ Tzu-Yi Chen, Department of Mathematics and Computer Science,
+ Pomona College, Claremont CA 91711, USA
+ Published in:
+ Lecture Notes in Computer Science
+ Volume 3046 / 2004
+ pp. 20 - 28
+ Algorithm is described in Section 2, page 22 +
+
+ + + The default fill level. + + + + + The default drop tolerance. + + + + + The decomposed upper triangular matrix. + + + + + The decomposed lower triangular matrix. + + + + + The array containing the pivot values. + + + + + The fill level. + + + + + The drop tolerance. + + + + + The pivot tolerance. + + + + + Initializes a new instance of the class with the default settings. + + + + + Initializes a new instance of the class with the specified settings. + + + The amount of fill that is allowed in the matrix. The value is a fraction of + the number of non-zero entries in the original matrix. Values should be positive. + + + The absolute drop tolerance which indicates below what absolute value an entry + will be dropped from the matrix. A drop tolerance of 0.0 means that no values + will be dropped. Values should always be positive. + + + The pivot tolerance which indicates at what level pivoting will take place. A + value of 0.0 means that no pivoting will take place. + + + + + Gets or sets the amount of fill that is allowed in the matrix. The + value is a fraction of the number of non-zero entries in the original + matrix. The standard value is 200. + + + + Values should always be positive and can be higher than 1.0. A value lower + than 1.0 means that the eventual preconditioner matrix will have fewer + non-zero entries as the original matrix. A value higher than 1.0 means that + the eventual preconditioner can have more non-zero values than the original + matrix. + + + Note that any changes to the FillLevel after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the absolute drop tolerance which indicates below what absolute value + an entry will be dropped from the matrix. The standard value is 0.0001. + + + + The values should always be positive and can be larger than 1.0. A low value will + keep more small numbers in the preconditioner matrix. A high value will remove + more small numbers from the preconditioner matrix. + + + Note that any changes to the DropTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Gets or sets the pivot tolerance which indicates at what level pivoting will + take place. The standard value is 0.0 which means pivoting will never take place. + + + + The pivot tolerance is used to calculate if pivoting is necessary. Pivoting + will take place if any of the values in a row is bigger than the + diagonal value of that row divided by the pivot tolerance, i.e. pivoting + will take place if row(i,j) > row(i,i) / PivotTolerance for + any j that is not equal to i. + + + Note that any changes to the PivotTolerance after creating the preconditioner + will invalidate the created preconditioner and will require a re-initialization of + the preconditioner. + + + Thrown if a negative value is provided. + + + + Returns the upper triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the upper triagonal elements. + + + + Returns the lower triagonal matrix that was created during the LU decomposition. + + + This method is used for debugging purposes only and should normally not be used. + + A new matrix containing the lower triagonal elements. + + + + Returns the pivot array. This array is not needed for normal use because + the preconditioner will return the solution vector values in the proper order. + + + This method is used for debugging purposes only and should normally not be used. + + The pivot array. + + + + Initializes the preconditioner and loads the internal data structures. + + + The upon which this preconditioner is based. Note that the + method takes a general matrix type. However internally the data is stored + as a sparse matrix. Therefore it is not recommended to pass a dense matrix. + + If is . + If is not a square matrix. + + + + Pivot elements in the according to internal pivot array + + Row to pivot in + + + + Was pivoting already performed + + Pivots already done + Current item to pivot + true if performed, otherwise false + + + + Swap columns in the + + Source . + First column index to swap + Second column index to swap + + + + Sort vector descending, not changing vector but placing sorted indicies to + + Start sort form + Sort till upper bound + Array with sorted vector indicies + Source + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + Pivot elements in according to internal pivot array + + Source . + Result after pivoting. + + + + An element sort algorithm for the class. + + + This sort algorithm is used to sort the columns in a sparse matrix based on + the value of the element on the diagonal of the matrix. + + + + + Sorts the elements of the vector in decreasing + fashion. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Sorts the elements of the vector in decreasing + fashion using heap sort algorithm. The vector itself is not affected. + + The starting index. + The stopping index. + An array that will contain the sorted indices once the algorithm finishes. + The that contains the values that need to be sorted. + + + + Build heap for double indicies + + Root position + Length of + Indicies of + Target + + + + Sift double indicies + + Indicies of + Target + Root position + Length of + + + + Sorts the given integers in a decreasing fashion. + + The values. + + + + Sort the given integers in a decreasing fashion using heapsort algorithm + + Array of values to sort + Length of + + + + Build heap + + Target values array + Root position + Length of + + + + Sift values + + Target value array + Root position + Length of + + + + Exchange values in array + + Target values array + First value to exchange + Second value to exchange + + + + A simple milu(0) preconditioner. + + + Original Fortran code by Youcef Saad (07 January 2004) + + + + Use modified or standard ILU(0) + + + + Gets or sets a value indicating whether to use modified or standard ILU(0). + + + + + Gets a value indicating whether the preconditioner is initialized. + + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix upon which the preconditioner is based. + If is . + If is not a square or is not an + instance of SparseCompressedRowMatrixStorage. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector b. + The left hand side vector x. + + + + MILU0 is a simple milu(0) preconditioner. + + Order of the matrix. + Matrix values in CSR format (input). + Column indices (input). + Row pointers (input). + Matrix values in MSR format (output). + Row pointers and column indices (output). + Pointer to diagonal elements (output). + True if the modified/MILU algorithm should be used (recommended) + Returns 0 on success or k > 0 if a zero pivot was encountered at step k. + + + + A Multiple-Lanczos Bi-Conjugate Gradient stabilized iterative matrix solver. + + + + The Multiple-Lanczos Bi-Conjugate Gradient stabilized (ML(k)-BiCGStab) solver is an 'improvement' + of the standard BiCgStab solver. + + + The algorithm was taken from:
+ ML(k)BiCGSTAB: A BiCGSTAB variant based on multiple Lanczos starting vectors +
+ Man-chung Yeung and Tony F. Chan +
+ SIAM Journal of Scientific Computing +
+ Volume 21, Number 4, pp. 1263 - 1290 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + The default number of starting vectors. + + + + + The collection of starting vectors which are used as the basis for the Krylov sub-space. + + + + + The number of starting vectors used by the algorithm + + + + + Gets or sets the number of starting vectors. + + + Must be larger than 1 and smaller than the number of variables in the matrix that + for which this solver will be used. + + + + + Resets the number of starting vectors to the default value. + + + + + Gets or sets a series of orthonormal vectors which will be used as basis for the + Krylov sub-space. + + + + + Gets the number of starting vectors to create + + Maximum number + Number of variables + Number of starting vectors to create + + + + Returns an array of starting vectors. + + The maximum number of starting vectors that should be created. + The number of variables. + + An array with starting vectors. The array will never be larger than the + but it may be smaller if + the is smaller than + the . + + + + + Create random vectors array + + Number of vectors + Size of each vector + Array of random vectors + + + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Source A. + Residual data. + x data. + b data. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Transpose Free Quasi-Minimal Residual (TFQMR) iterative matrix solver. + + + + The TFQMR algorithm was taken from:
+ Iterative methods for sparse linear systems. +
+ Yousef Saad +
+ Algorithm is described in Chapter 7, section 7.4.3, page 219 +
+ + The example code below provides an indication of the possible use of the + solver. + +
+
+ + + Calculates the true residual of the matrix equation Ax = b according to: residual = b - Ax + + Instance of the A. + Residual values in . + Instance of the x. + Instance of the b. + + + + Is even? + + Number to check + true if even, otherwise false + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + A Matrix with sparse storage, intended for very large matrices where most of the cells are zero. + The underlying storage scheme is 3-array compressed-sparse-row (CSR) Format. + Wikipedia - CSR. + + + + + Gets the number of non zero elements in the matrix. + + The number of non zero elements. + + + + Create a new sparse matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new square sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the order is less than one. + + + + Create a new sparse matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + If the row or column count is less than one. + + + + Create a new sparse matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable. + The enumerable is assumed to be in row-major order (row by row). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + + Create a new sparse matrix with the given number of rows and columns as a copy of the given array. + The array is assumed to be in column-major order (column by column). + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix and initialize each value to the same provided value. + + + + + Create a new sparse matrix and initialize each value using the provided init function. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value using the provided init function. + + + + + Create a new square sparse identity matrix where each diagonal value is set to One. + + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract to this matrix. + The matrix to store the result of subtraction. + If the other matrix is . + If the two matrices don't have the same dimensions. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The matrix to pointwise divide this one by. + The matrix to store the result of the pointwise division. + + + + Evaluates whether this matrix is symmetric. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + A vector with sparse storage, intended for very large vectors where most of the cells are zero. + + The sparse vector is not thread safe. + + + + Gets the number of non zero elements in the vector. + + The number of non zero elements. + + + + Create a new sparse vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new sparse vector with the given length. + All cells of the vector will be initialized to zero. + Zero-length vectors are not supported. + + If length is less than one. + + + + Create a new sparse vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector and initialize each value using the provided value. + + + + + Create a new sparse vector and initialize each value using the provided init function. + + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + Warning, the new 'sparse vector' with a non-zero scalar added to it will be a 100% filled + sparse vector and very inefficient. Would be better to work with a dense vector instead. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Negates vector and saves result to + + Target vector + + + + Conjugates vector and save result to + + Target vector + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Multiplies a vector with a complex. + + The vector to scale. + The complex value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a complex. + + The complex value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a vector with a complex. + + The vector to divide. + The complex value. + The result of the division. + If is . + + + + Computes the modulus of each element of the vector of the given divisor. + + The vector whose elements we want to compute the modulus of. + The divisor to use, + The result of the calculation + If is . + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = ( ∑|this[i]|^p )^(1/p) + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Creates a double sparse vector based on a string. The string can be in the following formats (without the + quotes): 'n', 'n;n;..', '(n;n;..)', '[n;n;...]', where n is a Complex32. + + + A double sparse vector containing the values specified by the given string. + + + the string to parse. + + + An that supplies culture-specific formatting information. + + + + + Converts the string representation of a complex sparse vector to double-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Converts the string representation of a complex sparse vector to double-precision sparse vector equivalent. + A return value indicates whether the conversion succeeded or failed. + + + A string containing a complex vector to convert. + + + An that supplies culture-specific formatting information about value. + + + The parsed value. + + + If the conversion succeeds, the result will contain a complex number equivalent to value. + Otherwise the result will be null. + + + + + Complex32 version of the class. + + + + + Initializes a new instance of the Vector class. + + + + + Set all values whose absolute value is smaller than the threshold to zero. + + + + + Conjugates vector and save result to + + Target vector + + + + Negates vector and saves result to + + Target vector + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to add. + + + The vector to store the result of the addition. + + + + + Adds another vector to this vector and stores the result into the result vector. + + + The vector to add to this one. + + + The vector to store the result of the addition. + + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + + The scalar to subtract. + + + The vector to store the result of the subtraction. + + + + + Subtracts another vector to this vector and stores the result into the result vector. + + + The vector to subtract from this one. + + + The vector to store the result of the subtraction. + + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + + The scalar to multiply. + + + The vector to store the result of the multiplication. + + + + + Divides each element of the vector by a scalar and stores the result in the result vector. + + + The scalar to divide with. + + + The vector to store the result of the division. + + + + + Divides a scalar by each element of the vector and stores the result in the result vector. + + The scalar to divide. + The vector to store the result of the division. + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The vector to pointwise divide this one by. + The vector to store the result of the pointwise division. + + + + Pointwise raise this vector to an exponent and store the result into the result vector. + + The exponent to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Returns the value of the absolute minimum element. + + The value of the absolute minimum element. + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the value of the absolute maximum element. + + The value of the absolute maximum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + + The p value. + + + Scalar ret = ( ∑|At(i)|^p )^(1/p) + + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Normalizes this vector to a unit vector with respect to the p-norm. + + + The p value. + + + This vector normalized to a unit vector with respect to the p-norm. + + + + + Generic linear algebra type builder, for situations where a matrix or vector + must be created in a generic way. Usage of generic builders should not be + required in normal user code. + + + + + Gets the value of 0.0 for type T. + + + + + Gets the value of 1.0 for type T. + + + + + Create a new matrix straight from an initialized matrix storage instance. + If you have an instance of a discrete storage type instead, use their direct methods instead. + + + + + Create a new matrix with the same kind of the provided example. + + + + + Create a new matrix with the same kind and dimensions of the provided example. + + + + + Create a new matrix with the same kind of the provided example. + + + + + Create a new matrix with a type that can represent and is closest to both provided samples. + + + + + Create a new matrix with a type that can represent and is closest to both provided samples and the dimensions of example. + + + + + Create a new dense matrix with values sampled from the provided random distribution. + + + + + Create a new dense matrix with values sampled from the standard distribution with a system random source. + + + + + Create a new dense matrix with values sampled from the standard distribution with a system random source. + + + + + Create a new positive definite dense matrix where each value is the product + of two samples from the provided random distribution. + + + + + Create a new positive definite dense matrix where each value is the product + of two samples from the standard distribution. + + + + + Create a new positive definite dense matrix where each value is the product + of two samples from the provided random distribution. + + + + + Create a new dense matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + + + + Create a new dense matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to be in column-major order (column by column) and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + + Create a new dense matrix and initialize each value to the same provided value. + + + + + Create a new dense matrix and initialize each value using the provided init function. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value using the provided init function. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new dense matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable. + The enumerable is assumed to be in column-major order (column by column). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable. + The enumerable is assumed to be in row-major order (row by row). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix from a 2D array of existing matrices. + The matrices in the array are not required to be dense already. + If the matrices do not align properly, they are placed on the top left + corner of their cell with the remaining fields left zero. + + + + + Create a new sparse matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a sparse matrix of T with the given number of rows and columns. + + The number of rows. + The number of columns. + + + + Create a new sparse matrix and initialize each value to the same provided value. + + + + + Create a new sparse matrix and initialize each value using the provided init function. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value using the provided init function. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new sparse matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable. + The enumerable is assumed to be in row-major order (row by row). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + + Create a new sparse matrix with the given number of rows and columns as a copy of the given array. + The array is assumed to be in column-major order (column by column). + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix from a 2D array of existing matrices. + The matrices in the array are not required to be sparse already. + If the matrices do not align properly, they are placed on the top left + corner of their cell with the remaining fields left zero. + + + + + Create a new diagonal matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + + + + Create a new diagonal matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to represent the diagonal values and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new square diagonal matrix directly binding to a raw array. + The array is assumed to represent the diagonal values and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new diagonal matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal matrix and initialize each diagonal value using the provided init function. + + + + + Create a new diagonal identity matrix with a one-diagonal. + + + + + Create a new diagonal identity matrix with a one-diagonal. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Generic linear algebra type builder, for situations where a matrix or vector + must be created in a generic way. Usage of generic builders should not be + required in normal user code. + + + + + Gets the value of 0.0 for type T. + + + + + Gets the value of 1.0 for type T. + + + + + Create a new vector straight from an initialized matrix storage instance. + If you have an instance of a discrete storage type instead, use their direct methods instead. + + + + + Create a new vector with the same kind of the provided example. + + + + + Create a new vector with the same kind and dimension of the provided example. + + + + + Create a new vector with the same kind of the provided example. + + + + + Create a new vector with a type that can represent and is closest to both provided samples. + + + + + Create a new vector with a type that can represent and is closest to both provided samples and the dimensions of example. + + + + + Create a new vector with a type that can represent and is closest to both provided samples. + + + + + Create a new dense vector with values sampled from the provided random distribution. + + + + + Create a new dense vector with values sampled from the standard distribution with a system random source. + + + + + Create a new dense vector with values sampled from the standard distribution with a system random source. + + + + + Create a new dense vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a dense vector of T with the given size. + + The size of the vector. + + + + Create a dense vector of T that is directly bound to the specified array. + + + + + Create a new dense vector and initialize each value using the provided value. + + + + + Create a new dense vector and initialize each value using the provided init function. + + + + + Create a new dense vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a sparse vector of T with the given size. + + The size of the vector. + + + + Create a new sparse vector and initialize each value using the provided value. + + + + + Create a new sparse vector and initialize each value using the provided init function. + + + + + Create a new sparse vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new matrix straight from an initialized matrix storage instance. + If you have an instance of a discrete storage type instead, use their direct methods instead. + + + + + Create a new matrix with the same kind of the provided example. + + + + + Create a new matrix with the same kind and dimensions of the provided example. + + + + + Create a new matrix with the same kind of the provided example. + + + + + Create a new matrix with a type that can represent and is closest to both provided samples. + + + + + Create a new matrix with a type that can represent and is closest to both provided samples and the dimensions of example. + + + + + Create a new dense matrix with values sampled from the provided random distribution. + + + + + Create a new dense matrix with values sampled from the standard distribution with a system random source. + + + + + Create a new dense matrix with values sampled from the standard distribution with a system random source. + + + + + Create a new positive definite dense matrix where each value is the product + of two samples from the provided random distribution. + + + + + Create a new positive definite dense matrix where each value is the product + of two samples from the standard distribution. + + + + + Create a new positive definite dense matrix where each value is the product + of two samples from the provided random distribution. + + + + + Create a new dense matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new dense matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + + + + Create a new dense matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to be in column-major order (column by column) and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + + Create a new dense matrix and initialize each value to the same provided value. + + + + + Create a new dense matrix and initialize each value using the provided init function. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal dense matrix and initialize each diagonal value using the provided init function. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new dense matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable. + The enumerable is assumed to be in column-major order (column by column). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix of T as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new dense matrix from a 2D array of existing matrices. + The matrices in the array are not required to be dense already. + If the matrices do not align properly, they are placed on the top left + corner of their cell with the remaining fields left zero. + + + + + Create a new sparse matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a sparse matrix of T with the given number of rows and columns. + + The number of rows. + The number of columns. + + + + Create a new sparse matrix and initialize each value to the same provided value. + + + + + Create a new sparse matrix and initialize each value using the provided init function. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal sparse matrix and initialize each diagonal value using the provided init function. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new diagonal dense identity matrix with a one-diagonal. + + + + + Create a new sparse matrix as a copy of the given other matrix. + This new matrix will be independent from the other matrix. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given two-dimensional array. + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable. + The enumerable is assumed to be in row-major order (row by row). + This new matrix will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + + Create a new sparse matrix with the given number of rows and columns as a copy of the given array. + The array is assumed to be in column-major order (column by column). + This new matrix will be independent from the provided array. + A new memory block will be allocated for storing the matrix. + + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable columns. + Each enumerable in the master enumerable specifies a column. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given column vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given enumerable of enumerable rows. + Each enumerable in the master enumerable specifies a row. + This new matrix will be independent from the enumerables. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row arrays. + This new matrix will be independent from the arrays. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix as a copy of the given row vectors. + This new matrix will be independent from the vectors. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new sparse matrix from a 2D array of existing matrices. + The matrices in the array are not required to be sparse already. + If the matrices do not align properly, they are placed on the top left + corner of their cell with the remaining fields left zero. + + + + + Create a new diagonal matrix straight from an initialized matrix storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a new diagonal matrix with the given number of rows and columns. + All cells of the matrix will be initialized to zero. + Zero-length matrices are not supported. + + + + + Create a new diagonal matrix with the given number of rows and columns directly binding to a raw array. + The array is assumed to represent the diagonal values and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new square diagonal matrix directly binding to a raw array. + The array is assumed to represent the diagonal values and is used directly without copying. + Very efficient, but changes to the array and the matrix will affect each other. + + + + + Create a new diagonal matrix and initialize each diagonal value to the same provided value. + + + + + Create a new diagonal matrix and initialize each diagonal value using the provided init function. + + + + + Create a new diagonal identity matrix with a one-diagonal. + + + + + Create a new diagonal identity matrix with a one-diagonal. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given vector. + This new matrix will be independent from the vector. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new diagonal matrix with the diagonal as a copy of the given array. + This new matrix will be independent from the array. + A new memory block will be allocated for storing the matrix. + + + + + Create a new vector straight from an initialized matrix storage instance. + If you have an instance of a discrete storage type instead, use their direct methods instead. + + + + + Create a new vector with the same kind of the provided example. + + + + + Create a new vector with the same kind and dimension of the provided example. + + + + + Create a new vector with the same kind of the provided example. + + + + + Create a new vector with a type that can represent and is closest to both provided samples. + + + + + Create a new vector with a type that can represent and is closest to both provided samples and the dimensions of example. + + + + + Create a new vector with a type that can represent and is closest to both provided samples. + + + + + Create a new dense vector with values sampled from the provided random distribution. + + + + + Create a new dense vector with values sampled from the standard distribution with a system random source. + + + + + Create a new dense vector with values sampled from the standard distribution with a system random source. + + + + + Create a new dense vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a dense vector of T with the given size. + + The size of the vector. + + + + Create a dense vector of T that is directly bound to the specified array. + + + + + Create a new dense vector and initialize each value using the provided value. + + + + + Create a new dense vector and initialize each value using the provided init function. + + + + + Create a new dense vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new dense vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector straight from an initialized vector storage instance. + The storage is used directly without copying. + Intended for advanced scenarios where you're working directly with + storage for performance or interop reasons. + + + + + Create a sparse vector of T with the given size. + + The size of the vector. + + + + Create a new sparse vector and initialize each value using the provided value. + + + + + Create a new sparse vector and initialize each value using the provided init function. + + + + + Create a new sparse vector as a copy of the given other vector. + This new vector will be independent from the other vector. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given array. + This new vector will be independent from the array. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given enumerable. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + Create a new sparse vector as a copy of the given indexed enumerable. + Keys must be provided at most once, zero is assumed if a key is omitted. + This new vector will be independent from the enumerable. + A new memory block will be allocated for storing the vector. + + + + + A class which encapsulates the functionality of a Cholesky factorization. + For a symmetric, positive definite matrix A, the Cholesky factorization + is an lower triangular matrix L so that A = L*L'. + + + The computation of the Cholesky factorization is done at construction time. If the matrix is not symmetric + or positive definite, the constructor will throw an exception. + + Supported data types are double, single, , and . + + + + Gets the lower triangular form of the Cholesky matrix. + + + + + Gets the determinant of the matrix for which the Cholesky matrix was computed. + + + + + Gets the log determinant of the matrix for which the Cholesky matrix was computed. + + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, AX = B, with A Cholesky factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Solves a system of linear equations, Ax = b, with A Cholesky factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Eigenvalues and eigenvectors of a real matrix. + + + If A is symmetric, then A = V*D*V' where the eigenvalue matrix D is + diagonal and the eigenvector matrix V is orthogonal. + I.e. A = V*D*V' and V*VT=I. + If A is not symmetric, then the eigenvalue matrix D is block diagonal + with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, + lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The + columns of V represent the eigenvectors in the sense that A*V = V*D, + i.e. A.Multiply(V) equals V.Multiply(D). The matrix V may be badly + conditioned, or even singular, so the validity of the equation + A = V*D*Inverse(V) depends upon V.Condition(). + + Supported data types are double, single, , and . + + + + Gets or sets a value indicating whether matrix is symmetric or not + + + + + Gets the absolute value of determinant of the square matrix for which the EVD was computed. + + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + Gets or sets the eigen values (λ) of matrix in ascending value. + + + + + Gets or sets eigenvectors. + + + + + Gets or sets the block diagonal eigenvalue matrix. + + + + + Solves a system of linear equations, AX = B, with A EVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, AX = B, with A EVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Solves a system of linear equations, Ax = b, with A EVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the QR decomposition Modified Gram-Schmidt Orthogonalization. + Any real square matrix A may be decomposed as A = QR where Q is an orthogonal mxn matrix and R is an nxn upper triangular matrix. + + + The computation of the QR decomposition is done at construction time by modified Gram-Schmidt Orthogonalization. + + Supported data types are double, single, , and . + + + + Classes that solves a system of linear equations, AX = B. + + Supported data types are double, single, , and . + + + + Solves a system of linear equations, AX = B. + + The right hand side Matrix, B. + The left hand side Matrix, X. + + + + Solves a system of linear equations, AX = B. + + The right hand side Matrix, B. + The left hand side Matrix, X. + + + + Solves a system of linear equations, Ax = b + + The right hand side vector, b. + The left hand side Vector, x. + + + + Solves a system of linear equations, Ax = b. + + The right hand side vector, b. + The left hand side Matrix>, x. + + + + A class which encapsulates the functionality of an LU factorization. + For a matrix A, the LU factorization is a pair of lower triangular matrix L and + upper triangular matrix U so that A = L*U. + In the Math.Net implementation we also store a set of pivot elements for increased + numerical stability. The pivot elements encode a permutation matrix P such that P*A = L*U. + + + The computation of the LU factorization is done at construction time. + + Supported data types are double, single, , and . + + + + Gets the lower triangular factor. + + + + + Gets the upper triangular factor. + + + + + Gets the permutation applied to LU factorization. + + + + + Gets the determinant of the matrix for which the LU factorization was computed. + + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, AX = B, with A LU factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Solves a system of linear equations, Ax = b, with A LU factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Returns the inverse of this matrix. The inverse is calculated using LU decomposition. + + The inverse of this matrix. + + + + The type of QR factorization go perform. + + + + + Compute the full QR factorization of a matrix. + + + + + Compute the thin QR factorization of a matrix. + + + + + A class which encapsulates the functionality of the QR decomposition. + Any real square matrix A (m x n) may be decomposed as A = QR where Q is an orthogonal matrix + (its columns are orthogonal unit vectors meaning QTQ = I) and R is an upper triangular matrix + (also called right triangular matrix). + + + The computation of the QR decomposition is done at construction time by Householder transformation. + If a factorization is performed, the resulting Q matrix is an m x m matrix + and the R matrix is an m x n matrix. If a factorization is performed, the + resulting Q matrix is an m x n matrix and the R matrix is an n x n matrix. + + Supported data types are double, single, , and . + + + + Gets or sets orthogonal Q matrix + + + + + Gets the upper triangular factor R. + + + + + Gets the absolute determinant value of the matrix for which the QR matrix was computed. + + + + + Gets a value indicating whether the matrix is full rank or not. + + true if the matrix is full rank; otherwise false. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + A class which encapsulates the functionality of the singular value decomposition (SVD). + Suppose M is an m-by-n matrix whose entries are real numbers. + Then there exists a factorization of the form M = UΣVT where: + - U is an m-by-m unitary matrix; + - Σ is m-by-n diagonal matrix with nonnegative real numbers on the diagonal; + - VT denotes transpose of V, an n-by-n unitary matrix; + Such a factorization is called a singular-value decomposition of M. A common convention is to order the diagonal + entries Σ(i,i) in descending order. In this case, the diagonal matrix Σ is uniquely determined + by M (though the matrices U and V are not). The diagonal entries of Σ are known as the singular values of M. + + + The computation of the singular value decomposition is done at construction time. + + Supported data types are double, single, , and . + + + Indicating whether U and VT matrices have been computed during SVD factorization. + + + + Gets the singular values (Σ) of matrix in ascending value. + + + + + Gets the left singular vectors (U - m-by-m unitary matrix) + + + + + Gets the transpose right singular vectors (transpose of V, an n-by-n unitary matrix) + + + + + Returns the singular values as a diagonal . + + The singular values as a diagonal . + + + + Gets the effective numerical matrix rank. + + The number of non-negligible singular values. + + + + Gets the two norm of the . + + The 2-norm of the . + + + + Gets the condition number max(S) / min(S) + + The condition number. + + + + Gets the determinant of the square matrix for which the SVD was computed. + + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, AX = B, with A SVD factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Solves a system of linear equations, Ax = b, with A SVD factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Defines the base class for Matrix classes. + + + Defines the base class for Matrix classes. + + Supported data types are double, single, , and . + + Defines the base class for Matrix classes. + + + Defines the base class for Matrix classes. + + + + + The value of 1.0. + + + + + The value of 0.0. + + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + + + + Complex conjugates each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + + + + Add a scalar to each element of the matrix and stores the result in the result vector. + + The scalar to add. + The matrix to store the result of the addition. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the matrix and stores the result in the result matrix. + + The scalar to subtract. + The matrix to store the result of the subtraction. + + + + Subtracts each element of the matrix from a scalar and stores the result in the result matrix. + + The scalar to subtract from. + The matrix to store the result of the subtraction. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar denominator to use. + The matrix to store the result of the division. + + + + Divides a scalar by each element of the matrix and stores the result in the result matrix. + + The scalar numerator to use. + The matrix to store the result of the division. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given divisor each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the matrix. + + The scalar numerator to use. + A vector to store the results in. + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use. + The matrix to store the result of the pointwise division. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + The matrix to store the result of the pointwise power. + + + + Pointwise raise this matrix to an exponent matrix and store the result into the result matrix. + + The exponent matrix to raise this matrix values to. + The matrix to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this matrix with another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result matrix. + + The matrix to store the result. + + + + Adds a scalar to each element of the matrix. + + The scalar to add. + The result of the addition. + If the two matrices don't have the same dimensions. + + + + Adds a scalar to each element of the matrix and stores the result in the result matrix. + + The scalar to add. + The matrix to store the result of the addition. + If the two matrices don't have the same dimensions. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The result of the addition. + If the two matrices don't have the same dimensions. + + + + Adds another matrix to this matrix. + + The matrix to add to this matrix. + The matrix to store the result of the addition. + If the two matrices don't have the same dimensions. + + + + Subtracts a scalar from each element of the matrix. + + The scalar to subtract. + A new matrix containing the subtraction of this matrix and the scalar. + + + + Subtracts a scalar from each element of the matrix and stores the result in the result matrix. + + The scalar to subtract. + The matrix to store the result of the subtraction. + If this matrix and are not the same size. + + + + Subtracts each element of the matrix from a scalar. + + The scalar to subtract from. + A new matrix containing the subtraction of the scalar and this matrix. + + + + Subtracts each element of the matrix from a scalar and stores the result in the result matrix. + + The scalar to subtract from. + The matrix to store the result of the subtraction. + If this matrix and are not the same size. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The result of the subtraction. + If the two matrices don't have the same dimensions. + + + + Subtracts another matrix from this matrix. + + The matrix to subtract. + The matrix to store the result of the subtraction. + If the two matrices don't have the same dimensions. + + + + Multiplies each element of this matrix with a scalar. + + The scalar to multiply with. + The result of the multiplication. + + + + Multiplies each element of the matrix by a scalar and places results into the result matrix. + + The scalar to multiply the matrix with. + The matrix to store the result of the multiplication. + If the result matrix's dimensions are not the same as this matrix. + + + + Divides each element of this matrix with a scalar. + + The scalar to divide with. + The result of the division. + + + + Divides each element of the matrix by a scalar and places results into the result matrix. + + The scalar to divide the matrix with. + The matrix to store the result of the division. + If the result matrix's dimensions are not the same as this matrix. + + + + Divides a scalar by each element of the matrix. + + The scalar to divide. + The result of the division. + + + + Divides a scalar by each element of the matrix and places results into the result matrix. + + The scalar to divide. + The matrix to store the result of the division. + If the result matrix's dimensions are not the same as this matrix. + + + + Multiplies this matrix by a vector and returns the result. + + The vector to multiply with. + The result of the multiplication. + If this.ColumnCount != rightSide.Count. + + + + Multiplies this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + If result.Count != this.RowCount. + If this.ColumnCount != .Count. + + + + Left multiply a matrix with a vector ( = vector * matrix ). + + The vector to multiply with. + The result of the multiplication. + If this.RowCount != .Count. + + + + Left multiply a matrix with a vector ( = vector * matrix ) and place the result in the result vector. + + The vector to multiply with. + The result of the multiplication. + If result.Count != this.ColumnCount. + If this.RowCount != .Count. + + + + Left multiply a matrix with a vector ( = vector * matrix ) and place the result in the result vector. + + The vector to multiply with. + The result of the multiplication. + + + + Multiplies this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + If this.Columns != other.Rows. + If the result matrix's dimensions are not the this.Rows x other.Columns. + + + + Multiplies this matrix with another matrix and returns the result. + + The matrix to multiply with. + If this.Columns != other.Rows. + The result of the multiplication. + + + + Multiplies this matrix with transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + If this.Columns != other.ColumnCount. + If the result matrix's dimensions are not the this.RowCount x other.RowCount. + + + + Multiplies this matrix with transpose of another matrix and returns the result. + + The matrix to multiply with. + If this.Columns != other.ColumnCount. + The result of the multiplication. + + + + Multiplies the transpose of this matrix by a vector and returns the result. + + The vector to multiply with. + The result of the multiplication. + If this.RowCount != rightSide.Count. + + + + Multiplies the transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + If result.Count != this.ColumnCount. + If this.RowCount != .Count. + + + + Multiplies the transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + If this.Rows != other.RowCount. + If the result matrix's dimensions are not the this.ColumnCount x other.ColumnCount. + + + + Multiplies the transpose of this matrix with another matrix and returns the result. + + The matrix to multiply with. + If this.Rows != other.RowCount. + The result of the multiplication. + + + + Multiplies this matrix with the conjugate transpose of another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + If this.Columns != other.ColumnCount. + If the result matrix's dimensions are not the this.RowCount x other.RowCount. + + + + Multiplies this matrix with the conjugate transpose of another matrix and returns the result. + + The matrix to multiply with. + If this.Columns != other.ColumnCount. + The result of the multiplication. + + + + Multiplies the conjugate transpose of this matrix by a vector and returns the result. + + The vector to multiply with. + The result of the multiplication. + If this.RowCount != rightSide.Count. + + + + Multiplies the conjugate transpose of this matrix with a vector and places the results into the result vector. + + The vector to multiply with. + The result of the multiplication. + If result.Count != this.ColumnCount. + If this.RowCount != .Count. + + + + Multiplies the conjugate transpose of this matrix with another matrix and places the results into the result matrix. + + The matrix to multiply with. + The result of the multiplication. + If this.Rows != other.RowCount. + If the result matrix's dimensions are not the this.ColumnCount x other.ColumnCount. + + + + Multiplies the conjugate transpose of this matrix with another matrix and returns the result. + + The matrix to multiply with. + If this.Rows != other.RowCount. + The result of the multiplication. + + + + Raises this square matrix to a positive integer exponent and places the results into the result matrix. + + The positive integer exponent to raise the matrix to. + The result of the power. + + + + Multiplies this square matrix with another matrix and returns the result. + + The positive integer exponent to raise the matrix to. + + + + Negate each element of this matrix. + + A matrix containing the negated values. + + + + Negate each element of this matrix and place the results into the result matrix. + + The result of the negation. + if the result matrix's dimensions are not the same as this matrix. + + + + Complex conjugate each element of this matrix. + + A matrix containing the conjugated values. + + + + Complex conjugate each element of this matrix and place the results into the result matrix. + + The result of the conjugation. + if the result matrix's dimensions are not the same as this matrix. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the matrix. + + The scalar denominator to use. + A matrix containing the results. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the matrix. + + The scalar numerator to use. + A matrix containing the results. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the matrix. + + The scalar numerator to use. + Matrix to store the results in. + + + + Computes the remainder (matrix % divisor), where the result has the sign of the dividend, + for each element of the matrix. + + The scalar denominator to use. + A matrix containing the results. + + + + Computes the remainder (matrix % divisor), where the result has the sign of the dividend, + for each element of the matrix. + + The scalar denominator to use. + Matrix to store the results in. + + + + Computes the remainder (dividend % matrix), where the result has the sign of the dividend, + for each element of the matrix. + + The scalar numerator to use. + A matrix containing the results. + + + + Computes the remainder (dividend % matrix), where the result has the sign of the dividend, + for each element of the matrix. + + The scalar numerator to use. + Matrix to store the results in. + + + + Pointwise multiplies this matrix with another matrix. + + The matrix to pointwise multiply with this one. + If this matrix and are not the same size. + A new matrix that is the pointwise multiplication of this matrix and . + + + + Pointwise multiplies this matrix with another matrix and stores the result into the result matrix. + + The matrix to pointwise multiply with this one. + The matrix to store the result of the pointwise multiplication. + If this matrix and are not the same size. + If this matrix and are not the same size. + + + + Pointwise divide this matrix by another matrix. + + The pointwise denominator matrix to use. + If this matrix and are not the same size. + A new matrix that is the pointwise division of this matrix and . + + + + Pointwise divide this matrix by another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use. + The matrix to store the result of the pointwise division. + If this matrix and are not the same size. + If this matrix and are not the same size. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + + + + Pointwise raise this matrix to an exponent. + + The exponent to raise this matrix values to. + The matrix to store the result into. + If this matrix and are not the same size. + + + + Pointwise raise this matrix to an exponent and store the result into the result matrix. + + The exponent to raise this matrix values to. + + + + Pointwise raise this matrix to an exponent. + + The exponent to raise this matrix values to. + The matrix to store the result into. + If this matrix and are not the same size. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this matrix by another matrix. + + The pointwise denominator matrix to use. + If this matrix and are not the same size. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this matrix by another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use. + The matrix to store the result of the pointwise modulus. + If this matrix and are not the same size. + If this matrix and are not the same size. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this matrix by another matrix. + + The pointwise denominator matrix to use. + If this matrix and are not the same size. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this matrix by another matrix and stores the result into the result matrix. + + The pointwise denominator matrix to use. + The matrix to store the result of the pointwise remainder. + If this matrix and are not the same size. + If this matrix and are not the same size. + + + + Helper function to apply a unary function to a matrix. The function + f modifies the matrix given to it in place. Before its + called, a copy of the 'this' matrix is first created, then passed to + f. The copy is then returned as the result + + Function which takes a matrix, modifies it in place and returns void + New instance of matrix which is the result + + + + Helper function to apply a unary function which modifies a matrix + in place. + + Function which takes a matrix, modifies it in place and returns void + The matrix to be passed to f and where the result is to be stored + If this vector and are not the same size. + + + + Helper function to apply a binary function which takes two matrices + and modifies the latter in place. A copy of the "this" matrix is + first made and then passed to f together with the other matrix. The + copy is then returned as the result + + Function which takes two matrices, modifies the second in place and returns void + The other matrix to be passed to the function as argument. It is not modified + The resulting matrix + If this amtrix and are not the same dimension. + + + + Helper function to apply a binary function which takes two matrices + and modifies the second one in place + + Function which takes two matrices, modifies the second in place and returns void + The other matrix to be passed to the function as argument. It is not modified + The resulting matrix + If this matrix and are not the same dimension. + + + + Pointwise applies the exponent function to each value. + + + + + Pointwise applies the exponent function to each value. + + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the natural logarithm function to each value. + + + + + Pointwise applies the natural logarithm function to each value. + + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the abs function to each value + + + + + Pointwise applies the abs function to each value + + The vector to store the result + + + + Pointwise applies the acos function to each value + + + + + Pointwise applies the acos function to each value + + The vector to store the result + + + + Pointwise applies the asin function to each value + + + + + Pointwise applies the asin function to each value + + The vector to store the result + + + + Pointwise applies the atan function to each value + + + + + Pointwise applies the atan function to each value + + The vector to store the result + + + + Pointwise applies the atan2 function to each value of the current + matrix and a given other matrix being the 'x' of atan2 and the + 'this' matrix being the 'y' + + + + + + + Pointwise applies the atan2 function to each value of the current + matrix and a given other matrix being the 'x' of atan2 and the + 'this' matrix being the 'y' + + + + + + + Pointwise applies the ceiling function to each value + + + + + Pointwise applies the ceiling function to each value + + The vector to store the result + + + + Pointwise applies the cos function to each value + + + + + Pointwise applies the cos function to each value + + The vector to store the result + + + + Pointwise applies the cosh function to each value + + + + + Pointwise applies the cosh function to each value + + The vector to store the result + + + + Pointwise applies the floor function to each value + + + + + Pointwise applies the floor function to each value + + The vector to store the result + + + + Pointwise applies the log10 function to each value + + + + + Pointwise applies the log10 function to each value + + The vector to store the result + + + + Pointwise applies the round function to each value + + + + + Pointwise applies the round function to each value + + The vector to store the result + + + + Pointwise applies the sign function to each value + + + + + Pointwise applies the sign function to each value + + The vector to store the result + + + + Pointwise applies the sin function to each value + + + + + Pointwise applies the sin function to each value + + The vector to store the result + + + + Pointwise applies the sinh function to each value + + + + + Pointwise applies the sinh function to each value + + The vector to store the result + + + + Pointwise applies the sqrt function to each value + + + + + Pointwise applies the sqrt function to each value + + The vector to store the result + + + + Pointwise applies the tan function to each value + + + + + Pointwise applies the tan function to each value + + The vector to store the result + + + + Pointwise applies the tanh function to each value + + + + + Pointwise applies the tanh function to each value + + The vector to store the result + + + + Computes the trace of this matrix. + + The trace of this matrix + If the matrix is not square + + + + Calculates the rank of the matrix. + + effective numerical rank, obtained from SVD + + + + Calculates the nullity of the matrix. + + effective numerical nullity, obtained from SVD + + + Calculates the condition number of this matrix. + The condition number of the matrix. + The condition number is calculated using singular value decomposition. + + + Computes the determinant of this matrix. + The determinant of this matrix. + + + + Computes an orthonormal basis for the null space of this matrix, + also known as the kernel of the corresponding matrix transformation. + + + + + Computes an orthonormal basis for the column space of this matrix, + also known as the range or image of the corresponding matrix transformation. + + + + Computes the inverse of this matrix. + The inverse of this matrix. + + + Computes the Moore-Penrose Pseudo-Inverse of this matrix. + + + + Computes the Kronecker product of this matrix with the given matrix. The new matrix is M-by-N + with M = this.Rows * lower.Rows and N = this.Columns * lower.Columns. + + The other matrix. + The Kronecker product of the two matrices. + + + + Computes the Kronecker product of this matrix with the given matrix. The new matrix is M-by-N + with M = this.Rows * lower.Rows and N = this.Columns * lower.Columns. + + The other matrix. + The Kronecker product of the two matrices. + If the result matrix's dimensions are not (this.Rows * lower.rows) x (this.Columns * lower.Columns). + + + + Pointwise applies the minimum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the minimum with a scalar to each value. + + The scalar value to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the maximum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the maximum with a scalar to each value. + + The scalar value to compare to. + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the absolute minimum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the absolute minimum with a scalar to each value. + + The scalar value to compare to. + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the absolute maximum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the absolute maximum with a scalar to each value. + + The scalar value to compare to. + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the minimum with the values of another matrix to each value. + + The matrix with the values to compare to. + + + + Pointwise applies the minimum with the values of another matrix to each value. + + The matrix with the values to compare to. + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the maximum with the values of another matrix to each value. + + The matrix with the values to compare to. + + + + Pointwise applies the maximum with the values of another matrix to each value. + + The matrix with the values to compare to. + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the absolute minimum with the values of another matrix to each value. + + The matrix with the values to compare to. + + + + Pointwise applies the absolute minimum with the values of another matrix to each value. + + The matrix with the values to compare to. + The matrix to store the result. + If this matrix and are not the same size. + + + + Pointwise applies the absolute maximum with the values of another matrix to each value. + + The matrix with the values to compare to. + + + + Pointwise applies the absolute maximum with the values of another matrix to each value. + + The matrix with the values to compare to. + The matrix to store the result. + If this matrix and are not the same size. + + + Calculates the induced L1 norm of this matrix. + The maximum absolute column sum of the matrix. + + + Calculates the induced L2 norm of the matrix. + The largest singular value of the matrix. + + For sparse matrices, the L2 norm is computed using a dense implementation of singular value decomposition. + In a later release, it will be replaced with a sparse implementation. + + + + Calculates the induced infinity norm of this matrix. + The maximum absolute row sum of the matrix. + + + Calculates the entry-wise Frobenius norm of this matrix. + The square root of the sum of the squared values. + + + + Calculates the p-norms of all row vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the p-norms of all column vectors. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all row vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Normalizes all column vectors to a unit p-norm. + Typical values for p are 1.0 (L1, Manhattan norm), 2.0 (L2, Euclidean norm) and positive infinity (infinity norm) + + + + + Calculates the value sum of each row vector. + + + + + Calculates the value sum of each column vector. + + + + + Calculates the absolute value sum of each row vector. + + + + + Calculates the absolute value sum of each column vector. + + + + + Indicates whether the current object is equal to another object of the same type. + + + An object to compare with this object. + + + true if the current object is equal to the parameter; otherwise, false. + + + + + Determines whether the specified is equal to this instance. + + The to compare with this instance. + + true if the specified is equal to this instance; otherwise, false. + + + + + Returns a hash code for this instance. + + + A hash code for this instance, suitable for use in hashing algorithms and data structures like a hash table. + + + + + Returns a string that describes the type, dimensions and shape of this matrix. + + + + + Returns a string 2D array that summarizes the content of this matrix. + + + + + Returns a string 2D array that summarizes the content of this matrix. + + + + + Returns a string that summarizes the content of this matrix. + + + + + Returns a string that summarizes the content of this matrix. + + + + + Returns a string that summarizes this matrix. + + + + + Returns a string that summarizes this matrix. + The maximum number of cells can be configured in the class. + + + + + Returns a string that summarizes this matrix. + The maximum number of cells can be configured in the class. + The format string is ignored. + + + + + Initializes a new instance of the Matrix class. + + + + + Gets the raw matrix data storage. + + + + + Gets the number of columns. + + The number of columns. + + + + Gets the number of rows. + + The number of rows. + + + + Gets or sets the value at the given row and column, with range checking. + + + The row of the element. + + + The column of the element. + + The value to get or set. + This method is ranged checked. and + to get and set values without range checking. + + + + Retrieves the requested element without range checking. + + + The row of the element. + + + The column of the element. + + + The requested element. + + + + + Sets the value of the given element without range checking. + + + The row of the element. + + + The column of the element. + + + The value to set the element to. + + + + + Sets all values to zero. + + + + + Sets all values of a row to zero. + + + + + Sets all values of a column to zero. + + + + + Sets all values for all of the chosen rows to zero. + + + + + Sets all values for all of the chosen columns to zero. + + + + + Sets all values of a sub-matrix to zero. + + + + + Set all values whose absolute value is smaller than the threshold to zero, in-place. + + + + + Set all values that meet the predicate to zero, in-place. + + + + + Creates a clone of this instance. + + + A clone of the instance. + + + + + Copies the elements of this matrix to the given matrix. + + + The matrix to copy values into. + + + If target is . + + + If this and the target matrix do not have the same dimensions.. + + + + + Copies a row into an Vector. + + The row to copy. + A Vector containing the copied elements. + If is negative, + or greater than or equal to the number of rows. + + + + Copies a row into to the given Vector. + + The row to copy. + The Vector to copy the row into. + If the result vector is . + If is negative, + or greater than or equal to the number of rows. + If this.Columns != result.Count. + + + + Copies the requested row elements into a new Vector. + + The row to copy elements from. + The column to start copying from. + The number of elements to copy. + A Vector containing the requested elements. + If: + is negative, + or greater than or equal to the number of rows. + is negative, + or greater than or equal to the number of columns. + (columnIndex + length) >= Columns. + If is not positive. + + + + Copies the requested row elements into a new Vector. + + The row to copy elements from. + The column to start copying from. + The number of elements to copy. + The Vector to copy the column into. + If the result Vector is . + If is negative, + or greater than or equal to the number of columns. + If is negative, + or greater than or equal to the number of rows. + If + + is greater than or equal to the number of rows. + If is not positive. + If result.Count < length. + + + + Copies a column into a new Vector>. + + The column to copy. + A Vector containing the copied elements. + If is negative, + or greater than or equal to the number of columns. + + + + Copies a column into to the given Vector. + + The column to copy. + The Vector to copy the column into. + If the result Vector is . + If is negative, + or greater than or equal to the number of columns. + If this.Rows != result.Count. + + + + Copies the requested column elements into a new Vector. + + The column to copy elements from. + The row to start copying from. + The number of elements to copy. + A Vector containing the requested elements. + If: + is negative, + or greater than or equal to the number of columns. + is negative, + or greater than or equal to the number of rows. + (rowIndex + length) >= Rows. + + If is not positive. + + + + Copies the requested column elements into the given vector. + + The column to copy elements from. + The row to start copying from. + The number of elements to copy. + The Vector to copy the column into. + If the result Vector is . + If is negative, + or greater than or equal to the number of columns. + If is negative, + or greater than or equal to the number of rows. + If + + is greater than or equal to the number of rows. + If is not positive. + If result.Count < length. + + + + Returns a new matrix containing the upper triangle of this matrix. + + The upper triangle of this matrix. + + + + Returns a new matrix containing the lower triangle of this matrix. + + The lower triangle of this matrix. + + + + Puts the lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Puts the upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Creates a matrix that contains the values from the requested sub-matrix. + + The row to start copying from. + The number of rows to copy. Must be positive. + The column to start copying from. + The number of columns to copy. Must be positive. + The requested sub-matrix. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + If or + is not positive. + + + + Returns the elements of the diagonal in a Vector. + + The elements of the diagonal. + For non-square matrices, the method returns Min(Rows, Columns) elements where + i == j (i is the row index, and j is the column index). + + + + Returns a new matrix containing the lower triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The lower triangle of this matrix. + + + + Puts the strictly lower triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Returns a new matrix containing the upper triangle of this matrix. The new matrix + does not contain the diagonal elements of this matrix. + + The upper triangle of this matrix. + + + + Puts the strictly upper triangle of this matrix into the result matrix. + + Where to store the lower triangle. + If is . + If the result matrix's dimensions are not the same as this matrix. + + + + Creates a new matrix and inserts the given column at the given index. + + The index of where to insert the column. + The column to insert. + A new matrix with the inserted column. + If is . + If is < zero or > the number of columns. + If the size of != the number of rows. + + + + Creates a new matrix with the given column removed. + + The index of the column to remove. + A new matrix without the chosen column. + If is < zero or >= the number of columns. + + + + Copies the values of the given Vector to the specified column. + + The column to copy the values to. + The vector to copy the values from. + If is . + If is less than zero, + or greater than or equal to the number of columns. + If the size of does not + equal the number of rows of this Matrix. + + + + Copies the values of the given Vector to the specified sub-column. + + The column to copy the values to. + The row to start copying to. + The number of elements to copy. + The vector to copy the values from. + If is . + If is less than zero, + or greater than or equal to the number of columns. + If the size of does not + equal the number of rows of this Matrix. + + + + Copies the values of the given array to the specified column. + + The column to copy the values to. + The array to copy the values from. + If is . + If is less than zero, + or greater than or equal to the number of columns. + If the size of does not + equal the number of rows of this Matrix. + If the size of does not + equal the number of rows of this Matrix. + + + + Creates a new matrix and inserts the given row at the given index. + + The index of where to insert the row. + The row to insert. + A new matrix with the inserted column. + If is . + If is < zero or > the number of rows. + If the size of != the number of columns. + + + + Creates a new matrix with the given row removed. + + The index of the row to remove. + A new matrix without the chosen row. + If is < zero or >= the number of rows. + + + + Copies the values of the given Vector to the specified row. + + The row to copy the values to. + The vector to copy the values from. + If is . + If is less than zero, + or greater than or equal to the number of rows. + If the size of does not + equal the number of columns of this Matrix. + + + + Copies the values of the given Vector to the specified sub-row. + + The row to copy the values to. + The column to start copying to. + The number of elements to copy. + The vector to copy the values from. + If is . + If is less than zero, + or greater than or equal to the number of rows. + If the size of does not + equal the number of columns of this Matrix. + + + + Copies the values of the given array to the specified row. + + The row to copy the values to. + The array to copy the values from. + If is . + If is less than zero, + or greater than or equal to the number of rows. + If the size of does not + equal the number of columns of this Matrix. + + + + Copies the values of a given matrix into a region in this matrix. + + The row to start copying to. + The column to start copying to. + The sub-matrix to copy from. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + + + + Copies the values of a given matrix into a region in this matrix. + + The row to start copying to. + The number of rows to copy. Must be positive. + The column to start copying to. + The number of columns to copy. Must be positive. + The sub-matrix to copy from. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + the size of is not at least x . + If or + is not positive. + + + + Copies the values of a given matrix into a region in this matrix. + + The row to start copying to. + The row of the sub-matrix to start copying from. + The number of rows to copy. Must be positive. + The column to start copying to. + The column of the sub-matrix to start copying from. + The number of columns to copy. Must be positive. + The sub-matrix to copy from. + If: is + negative, or greater than or equal to the number of rows. + is negative, or greater than or equal to the number + of columns. + (columnIndex + columnLength) >= Columns + (rowIndex + rowLength) >= Rows + the size of is not at least x . + If or + is not positive. + + + + Copies the values of the given Vector to the diagonal. + + The vector to copy the values from. The length of the vector should be + Min(Rows, Columns). + If is . + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + + Copies the values of the given array to the diagonal. + + The array to copy the values from. The length of the vector should be + Min(Rows, Columns). + If is . + If the length of does not + equal Min(Rows, Columns). + For non-square matrices, the elements of are copied to + this[i,i]. + + + + Returns the transpose of this matrix. + + The transpose of this matrix. + + + + Puts the transpose of this matrix into the result matrix. + + + + + Returns the conjugate transpose of this matrix. + + The conjugate transpose of this matrix. + + + + Puts the conjugate transpose of this matrix into the result matrix. + + + + + Permute the rows of a matrix according to a permutation. + + The row permutation to apply to this matrix. + + + + Permute the columns of a matrix according to a permutation. + + The column permutation to apply to this matrix. + + + + Concatenates this matrix with the given matrix. + + The matrix to concatenate. + The combined matrix. + + + + + + Concatenates this matrix with the given matrix and places the result into the result matrix. + + The matrix to concatenate. + The combined matrix. + + + + + + Stacks this matrix on top of the given matrix and places the result into the result matrix. + + The matrix to stack this matrix upon. + The combined matrix. + If lower is . + If upper.Columns != lower.Columns. + + + + + + Stacks this matrix on top of the given matrix and places the result into the result matrix. + + The matrix to stack this matrix upon. + The combined matrix. + If lower is . + If upper.Columns != lower.Columns. + + + + + + Diagonally stacks his matrix on top of the given matrix. The new matrix is a M-by-N matrix, + where M = this.Rows + lower.Rows and N = this.Columns + lower.Columns. + The values of off the off diagonal matrices/blocks are set to zero. + + The lower, right matrix. + If lower is . + the combined matrix + + + + + + Diagonally stacks his matrix on top of the given matrix and places the combined matrix into the result matrix. + + The lower, right matrix. + The combined matrix + If lower is . + If the result matrix is . + If the result matrix's dimensions are not (this.Rows + lower.rows) x (this.Columns + lower.Columns). + + + + + + Evaluates whether this matrix is symmetric. + + + + + Evaluates whether this matrix is hermitian (conjugate symmetric). + + + + + Evaluates whether this matrix is conjugate symmetric. + + + + + Returns this matrix as a multidimensional array. + The returned array will be independent from this matrix. + A new memory block will be allocated for the array. + + A multidimensional containing the values of this matrix. + + + + Returns the matrix's elements as an array with the data laid out column by column (column major). + The returned array will be independent from this matrix. + A new memory block will be allocated for the array. + +
+            1, 2, 3
+            4, 5, 6  will be returned as  1, 4, 7, 2, 5, 8, 3, 6, 9
+            7, 8, 9
+            
+ An array containing the matrix's elements. + + +
+ + + Returns the matrix's elements as an array with the data laid row by row (row major). + The returned array will be independent from this matrix. + A new memory block will be allocated for the array. + +
+            1, 2, 3
+            4, 5, 6  will be returned as  1, 2, 3, 4, 5, 6, 7, 8, 9
+            7, 8, 9
+            
+ An array containing the matrix's elements. + + +
+ + + Returns this matrix as array of row arrays. + The returned arrays will be independent from this matrix. + A new memory block will be allocated for the arrays. + + + + + Returns this matrix as array of column arrays. + The returned arrays will be independent from this matrix. + A new memory block will be allocated for the arrays. + + + + + Returns the internal multidimensional array of this matrix if, and only if, this matrix is stored by such an array internally. + Otherwise returns null. Changes to the returned array and the matrix will affect each other. + Use ToArray instead if you always need an independent array. + + + + + Returns the internal column by column (column major) array of this matrix if, and only if, this matrix is stored by such arrays internally. + Otherwise returns null. Changes to the returned arrays and the matrix will affect each other. + Use ToColumnMajorArray instead if you always need an independent array. + +
+            1, 2, 3
+            4, 5, 6  will be returned as  1, 4, 7, 2, 5, 8, 3, 6, 9
+            7, 8, 9
+            
+ An array containing the matrix's elements. + + +
+ + + Returns the internal row by row (row major) array of this matrix if, and only if, this matrix is stored by such arrays internally. + Otherwise returns null. Changes to the returned arrays and the matrix will affect each other. + Use ToRowMajorArray instead if you always need an independent array. + +
+            1, 2, 3
+            4, 5, 6  will be returned as  1, 2, 3, 4, 5, 6, 7, 8, 9
+            7, 8, 9
+            
+ An array containing the matrix's elements. + + +
+ + + Returns the internal row arrays of this matrix if, and only if, this matrix is stored by such arrays internally. + Otherwise returns null. Changes to the returned arrays and the matrix will affect each other. + Use ToRowArrays instead if you always need an independent array. + + + + + Returns the internal column arrays of this matrix if, and only if, this matrix is stored by such arrays internally. + Otherwise returns null. Changes to the returned arrays and the matrix will affect each other. + Use ToColumnArrays instead if you always need an independent array. + + + + + Returns an IEnumerable that can be used to iterate through all values of the matrix. + + + The enumerator will include all values, even if they are zero. + The ordering of the values is unspecified (not necessarily column-wise or row-wise). + + + + + Returns an IEnumerable that can be used to iterate through all values of the matrix. + + + The enumerator will include all values, even if they are zero. + The ordering of the values is unspecified (not necessarily column-wise or row-wise). + + + + + Returns an IEnumerable that can be used to iterate through all values of the matrix and their index. + + + The enumerator returns a Tuple with the first two values being the row and column index + and the third value being the value of the element at that index. + The enumerator will include all values, even if they are zero. + + + + + Returns an IEnumerable that can be used to iterate through all values of the matrix and their index. + + + The enumerator returns a Tuple with the first two values being the row and column index + and the third value being the value of the element at that index. + The enumerator will include all values, even if they are zero. + + + + + Returns an IEnumerable that can be used to iterate through all non-zero values of the matrix. + + + The enumerator will skip all elements with a zero value. + + + + + Returns an IEnumerable that can be used to iterate through all non-zero values of the matrix and their index. + + + The enumerator returns a Tuple with the first two values being the row and column index + and the third value being the value of the element at that index. + The enumerator will skip all elements with a zero value. + + + + + Returns an IEnumerable that can be used to iterate through all columns of the matrix. + + + + + Returns an IEnumerable that can be used to iterate through a subset of all columns of the matrix. + + The column to start enumerating over. + The number of columns to enumerating over. + + + + Returns an IEnumerable that can be used to iterate through all columns of the matrix and their index. + + + The enumerator returns a Tuple with the first value being the column index + and the second value being the value of the column at that index. + + + + + Returns an IEnumerable that can be used to iterate through a subset of all columns of the matrix and their index. + + The column to start enumerating over. + The number of columns to enumerating over. + + The enumerator returns a Tuple with the first value being the column index + and the second value being the value of the column at that index. + + + + + Returns an IEnumerable that can be used to iterate through all rows of the matrix. + + + + + Returns an IEnumerable that can be used to iterate through a subset of all rows of the matrix. + + The row to start enumerating over. + The number of rows to enumerating over. + + + + Returns an IEnumerable that can be used to iterate through all rows of the matrix and their index. + + + The enumerator returns a Tuple with the first value being the row index + and the second value being the value of the row at that index. + + + + + Returns an IEnumerable that can be used to iterate through a subset of all rows of the matrix and their index. + + The row to start enumerating over. + The number of rows to enumerating over. + + The enumerator returns a Tuple with the first value being the row index + and the second value being the value of the row at that index. + + + + + Applies a function to each value of this matrix and replaces the value with its result. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + Applies a function to each value of this matrix and replaces the value with its result. + The row and column indices of each value (zero-based) are passed as first arguments to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + Applies a function to each value of this matrix and replaces the value in the result matrix. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + Applies a function to each value of this matrix and replaces the value in the result matrix. + The index of each value (zero-based) is passed as first argument to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + Applies a function to each value of this matrix and replaces the value in the result matrix. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + Applies a function to each value of this matrix and replaces the value in the result matrix. + The index of each value (zero-based) is passed as first argument to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + Applies a function to each value of this matrix and returns the results as a new matrix. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + Applies a function to each value of this matrix and returns the results as a new matrix. + The index of each value (zero-based) is passed as first argument to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse matrices). + + + + + For each row, applies a function f to each element of the row, threading an accumulator argument through the computation. + Returns an array with the resulting accumulator states for each row. + + + + + For each column, applies a function f to each element of the column, threading an accumulator argument through the computation. + Returns an array with the resulting accumulator states for each column. + + + + + Applies a function f to each row vector, threading an accumulator vector argument through the computation. + Returns the resulting accumulator vector. + + + + + Applies a function f to each column vector, threading an accumulator vector argument through the computation. + Returns the resulting accumulator vector. + + + + + Reduces all row vectors by applying a function between two of them, until only a single vector is left. + + + + + Reduces all column vectors by applying a function between two of them, until only a single vector is left. + + + + + Applies a function to each value pair of two matrices and replaces the value in the result vector. + + + + + Applies a function to each value pair of two matrices and returns the results as a new vector. + + + + + Applies a function to update the status with each value pair of two matrices and returns the resulting status. + + + + + Returns a tuple with the index and value of the first element satisfying a predicate, or null if none is found. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns a tuple with the index and values of the first element pair of two matrices of the same size satisfying a predicate, or null if none is found. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if at least one element satisfies a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if at least one element pairs of two matrices of the same size satisfies a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if all elements satisfy a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if all element pairs of two matrices of the same size satisfy a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns a Matrix containing the same values of . + + The matrix to get the values from. + A matrix containing a the same values as . + If is . + + + + Negates each element of the matrix. + + The matrix to negate. + A matrix containing the negated values. + If is . + + + + Adds two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to add. + The right matrix to add. + The result of the addition. + If and don't have the same dimensions. + If or is . + + + + Adds a scalar to each element of the matrix. + + This operator will allocate new memory for the result. It will + choose the representation of the provided matrix. + The left matrix to add. + The scalar value to add. + The result of the addition. + If is . + + + + Adds a scalar to each element of the matrix. + + This operator will allocate new memory for the result. It will + choose the representation of the provided matrix. + The scalar value to add. + The right matrix to add. + The result of the addition. + If is . + + + + Subtracts two matrices together and returns the results. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to subtract. + The right matrix to subtract. + The result of the subtraction. + If and don't have the same dimensions. + If or is . + + + + Subtracts a scalar from each element of a matrix. + + This operator will allocate new memory for the result. It will + choose the representation of the provided matrix. + The left matrix to subtract. + The scalar value to subtract. + The result of the subtraction. + If and don't have the same dimensions. + If or is . + + + + Substracts each element of a matrix from a scalar. + + This operator will allocate new memory for the result. It will + choose the representation of the provided matrix. + The scalar value to subtract. + The right matrix to subtract. + The result of the subtraction. + If and don't have the same dimensions. + If or is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies a Matrix by a constant and returns the result. + + The matrix to multiply. + The constant to multiply the matrix by. + The result of the multiplication. + If is . + + + + Multiplies two matrices. + + This operator will allocate new memory for the result. It will + choose the representation of either or depending on which + is denser. + The left matrix to multiply. + The right matrix to multiply. + The result of multiplication. + If or is . + If the dimensions of or don't conform. + + + + Multiplies a Matrix and a Vector. + + The matrix to multiply. + The vector to multiply. + The result of multiplication. + If or is . + + + + Multiplies a Vector and a Matrix. + + The vector to multiply. + The matrix to multiply. + The result of multiplication. + If or is . + + + + Divides a scalar with a matrix. + + The scalar to divide. + The matrix. + The result of the division. + If is . + + + + Divides a matrix with a scalar. + + The matrix to divide. + The scalar value. + The result of the division. + If is . + + + + Computes the pointwise remainder (% operator), where the result has the sign of the dividend, + of each element of the matrix of the given divisor. + + The matrix whose elements we want to compute the modulus of. + The divisor to use. + The result of the calculation + If is . + + + + Computes the pointwise remainder (% operator), where the result has the sign of the dividend, + of the given dividend of each element of the matrix. + + The dividend we want to compute the modulus of. + The matrix whose elements we want to use as divisor. + The result of the calculation + If is . + + + + Computes the pointwise remainder (% operator), where the result has the sign of the dividend, + of each element of two matrices. + + The matrix whose elements we want to compute the remainder of. + The divisor to use. + If and are not the same size. + If is . + + + + Computes the sqrt of a matrix pointwise + + The input matrix + + + + + Computes the exponential of a matrix pointwise + + The input matrix + + + + + Computes the log of a matrix pointwise + + The input matrix + + + + + Computes the log10 of a matrix pointwise + + The input matrix + + + + + Computes the sin of a matrix pointwise + + The input matrix + + + + + Computes the cos of a matrix pointwise + + The input matrix + + + + + Computes the tan of a matrix pointwise + + The input matrix + + + + + Computes the asin of a matrix pointwise + + The input matrix + + + + + Computes the acos of a matrix pointwise + + The input matrix + + + + + Computes the atan of a matrix pointwise + + The input matrix + + + + + Computes the sinh of a matrix pointwise + + The input matrix + + + + + Computes the cosh of a matrix pointwise + + The input matrix + + + + + Computes the tanh of a matrix pointwise + + The input matrix + + + + + Computes the absolute value of a matrix pointwise + + The input matrix + + + + + Computes the floor of a matrix pointwise + + The input matrix + + + + + Computes the ceiling of a matrix pointwise + + The input matrix + + + + + Computes the rounded value of a matrix pointwise + + The input matrix + + + + + Computes the Cholesky decomposition for a matrix. + + The Cholesky decomposition object. + + + + Computes the LU decomposition for a matrix. + + The LU decomposition object. + + + + Computes the QR decomposition for a matrix. + + The type of QR factorization to perform. + The QR decomposition object. + + + + Computes the QR decomposition for a matrix using Modified Gram-Schmidt Orthogonalization. + + The QR decomposition object. + + + + Computes the SVD decomposition for a matrix. + + Compute the singular U and VT vectors or not. + The SVD decomposition object. + + + + Computes the EVD decomposition for a matrix. + + The EVD decomposition object. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, AX = B, with A QR factorized. + + The right hand side , B. + The left hand side , X. + + + + Solves a system of linear equations, Ax = b, with A QR factorized. + + The right hand side vector, b. + The left hand side , x. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix (this matrix), b is the solution vector and x is the unknown vector. + + The solution vector b. + The result vector x. + The iterative solver to use. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + Solves the matrix equation AX = B, where A is the coefficient matrix (this matrix), B is the solution matrix and X is the unknown matrix. + + The solution matrix B. + The result matrix X + The iterative solver to use. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix (this matrix), b is the solution vector and x is the unknown vector. + + The solution vector b. + The result vector x. + The iterative solver to use. + Criteria to control when to stop iterating. + The preconditioner to use for approximations. + + + + Solves the matrix equation AX = B, where A is the coefficient matrix (this matrix), B is the solution matrix and X is the unknown matrix. + + The solution matrix B. + The result matrix X + The iterative solver to use. + Criteria to control when to stop iterating. + The preconditioner to use for approximations. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix (this matrix), b is the solution vector and x is the unknown vector. + + The solution vector b. + The result vector x. + The iterative solver to use. + Criteria to control when to stop iterating. + + + + Solves the matrix equation AX = B, where A is the coefficient matrix (this matrix), B is the solution matrix and X is the unknown matrix. + + The solution matrix B. + The result matrix X + The iterative solver to use. + Criteria to control when to stop iterating. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix (this matrix), b is the solution vector and x is the unknown vector. + + The solution vector b. + The iterative solver to use. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + The result vector x. + + + + Solves the matrix equation AX = B, where A is the coefficient matrix (this matrix), B is the solution matrix and X is the unknown matrix. + + The solution matrix B. + The iterative solver to use. + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + The result matrix X. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix (this matrix), b is the solution vector and x is the unknown vector. + + The solution vector b. + The iterative solver to use. + Criteria to control when to stop iterating. + The preconditioner to use for approximations. + The result vector x. + + + + Solves the matrix equation AX = B, where A is the coefficient matrix (this matrix), B is the solution matrix and X is the unknown matrix. + + The solution matrix B. + The iterative solver to use. + Criteria to control when to stop iterating. + The preconditioner to use for approximations. + The result matrix X. + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix (this matrix), b is the solution vector and x is the unknown vector. + + The solution vector b. + The iterative solver to use. + Criteria to control when to stop iterating. + The result vector x. + + + + Solves the matrix equation AX = B, where A is the coefficient matrix (this matrix), B is the solution matrix and X is the unknown matrix. + + The solution matrix B. + The iterative solver to use. + Criteria to control when to stop iterating. + The result matrix X. + + + + Converts a matrix to single precision. + + + + + Converts a matrix to double precision. + + + + + Converts a matrix to single precision complex numbers. + + + + + Converts a matrix to double precision complex numbers. + + + + + Gets a single precision complex matrix with the real parts from the given matrix. + + + + + Gets a double precision complex matrix with the real parts from the given matrix. + + + + + Gets a real matrix representing the real parts of a complex matrix. + + + + + Gets a real matrix representing the real parts of a complex matrix. + + + + + Gets a real matrix representing the imaginary parts of a complex matrix. + + + + + Gets a real matrix representing the imaginary parts of a complex matrix. + + + + + Existing data may not be all zeros, so clearing may be necessary + if not all of it will be overwritten anyway. + + + + + If existing data is assumed to be all zeros already, + clearing it may be skipped if applicable. + + + + + Allow skipping zero entries (without enforcing skipping them). + When enumerating sparse matrices this can significantly speed up operations. + + + + + Force applying the operation to all fields even if they are zero. + + + + + It is not known yet whether a matrix is symmetric or not. + + + + + A matrix is symmetric + + + + + A matrix is hermitian (conjugate symmetric). + + + + + A matrix is not symmetric + + + + + Defines an that uses a cancellation token as stop criterion. + + + + + Initializes a new instance of the class. + + + + + Initializes a new instance of the class. + + + + + Determines the status of the iterative calculation based on the stop criteria stored + by the current . Result is set into Status field. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual stop criteria may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Gets the current calculation status. + + + + + Resets the to the pre-calculation state. + + + + + Clones the current and its settings. + + A new instance of the class. + + + + Stop criterion that delegates the status determination to a delegate. + + + + + Create a new instance of this criterion with a custom implementation. + + Custom implementation with the same signature and semantics as the DetermineStatus method. + + + + Determines the status of the iterative calculation by delegating it to the provided delegate. + Result is set into Status field. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual stop criteria may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Gets the current calculation status. + + + + + Resets the IIterationStopCriterion to the pre-calculation state. + + + + + Clones this criterion and its settings. + + + + + Monitors an iterative calculation for signs of divergence. + + + + + The maximum relative increase the residual may experience without triggering a divergence warning. + + + + + The number of iterations over which a residual increase should be tracked before issuing a divergence warning. + + + + + The status of the calculation + + + + + The array that holds the tracking information. + + + + + The iteration number of the last iteration. + + + + + Initializes a new instance of the class with the specified maximum + relative increase and the specified minimum number of tracking iterations. + + The maximum relative increase that the residual may experience before a divergence warning is issued. + The minimum number of iterations over which the residual must grow before a divergence warning is issued. + + + + Gets or sets the maximum relative increase that the residual may experience before a divergence warning is issued. + + Thrown if the Maximum is set to zero or below. + + + + Gets or sets the minimum number of iterations over which the residual must grow before + issuing a divergence warning. + + Thrown if the value is set to less than one. + + + + Determines the status of the iterative calculation based on the stop criteria stored + by the current . Result is set into Status field. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual stop criteria may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Detect if solution is diverging + + true if diverging, otherwise false + + + + Gets required history Length + + + + + Gets the current calculation status. + + + + + Resets the to the pre-calculation state. + + + + + Clones the current and its settings. + + A new instance of the class. + + + + Defines an that monitors residuals for NaN's. + + + + + The status of the calculation + + + + + The iteration number of the last iteration. + + + + + Determines the status of the iterative calculation based on the stop criteria stored + by the current . Result is set into Status field. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual stop criteria may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Gets the current calculation status. + + + + + Resets the to the pre-calculation state. + + + + + Clones the current and its settings. + + A new instance of the class. + + + + The base interface for classes that provide stop criteria for iterative calculations. + + + + + Determines the status of the iterative calculation based on the stop criteria stored + by the current IIterationStopCriterion. Status is set to Status field of current object. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual stop criteria may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Gets the current calculation status. + + is not a legal value. Status should be set in implementation. + + + + Resets the IIterationStopCriterion to the pre-calculation state. + + To implementers: Invoking this method should not clear the user defined + property values, only the state that is used to track the progress of the + calculation. + + + + Defines the interface for classes that solve the matrix equation Ax = b in + an iterative manner. + + + + + Solves the matrix equation Ax = b, where A is the coefficient matrix, b is the + solution vector and x is the unknown vector. + + The coefficient matrix, A. + The solution vector, b + The result vector, x + The iterator to use to control when to stop iterating. + The preconditioner to use for approximations. + + + + Defines the interface for objects that can create an iterative solver with + specific settings. This interface is used to pass iterative solver creation + setup information around. + + + + + Gets the type of the solver that will be created by this setup object. + + + + + Gets type of preconditioner, if any, that will be created by this setup object. + + + + + Creates the iterative solver to be used. + + + + + Creates the preconditioner to be used by default (can be overwritten). + + + + + Gets the relative speed of the solver. + + Returns a value between 0 and 1, inclusive. + + + + Gets the relative reliability of the solver. + + Returns a value between 0 and 1 inclusive. + + + + The base interface for preconditioner classes. + + + + Preconditioners are used by iterative solvers to improve the convergence + speed of the solving process. Increase in convergence speed + is related to the number of iterations necessary to get a converged solution. + So while in general the use of a preconditioner means that the iterative + solver will perform fewer iterations it does not guarantee that the actual + solution time decreases given that some preconditioners can be expensive to + setup and run. + + + Note that in general changes to the matrix will invalidate the preconditioner + if the changes occur after creating the preconditioner. + + + + + + Initializes the preconditioner and loads the internal data structures. + + The matrix on which the preconditioner is based. + + + + Approximates the solution to the matrix equation Mx = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + + Defines an that monitors the numbers of iteration + steps as stop criterion. + + + + + The default value for the maximum number of iterations the process is allowed + to perform. + + + + + The maximum number of iterations the calculation is allowed to perform. + + + + + The status of the calculation + + + + + Initializes a new instance of the class with the default maximum + number of iterations. + + + + + Initializes a new instance of the class with the specified maximum + number of iterations. + + The maximum number of iterations the calculation is allowed to perform. + + + + Gets or sets the maximum number of iterations the calculation is allowed to perform. + + Thrown if the Maximum is set to a negative value. + + + + Returns the maximum number of iterations to the default. + + + + + Determines the status of the iterative calculation based on the stop criteria stored + by the current . Result is set into Status field. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual stop criteria may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Gets the current calculation status. + + + + + Resets the to the pre-calculation state. + + + + + Clones the current and its settings. + + A new instance of the class. + + + + Iterative Calculation Status + + + + + An iterator that is used to check if an iterative calculation should continue or stop. + + + + + The collection that holds all the stop criteria and the flag indicating if they should be added + to the child iterators. + + + + + The status of the iterator. + + + + + Initializes a new instance of the class with the default stop criteria. + + + + + Initializes a new instance of the class with the specified stop criteria. + + + The specified stop criteria. Only one stop criterion of each type can be passed in. None + of the stop criteria will be passed on to child iterators. + + + + + Initializes a new instance of the class with the specified stop criteria. + + + The specified stop criteria. Only one stop criterion of each type can be passed in. None + of the stop criteria will be passed on to child iterators. + + + + + Gets the current calculation status. + + + + + Determines the status of the iterative calculation based on the stop criteria stored + by the current . Result is set into Status field. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual iterators may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Indicates to the iterator that the iterative process has been cancelled. + + + Does not reset the stop-criteria. + + + + + Resets the to the pre-calculation state. + + + + + Creates a deep clone of the current iterator. + + The deep clone of the current iterator. + + + + Defines an that monitors residuals as stop criterion. + + + + + The maximum value for the residual below which the calculation is considered converged. + + + + + The minimum number of iterations for which the residual has to be below the maximum before + the calculation is considered converged. + + + + + The status of the calculation + + + + + The number of iterations since the residuals got below the maximum. + + + + + The iteration number of the last iteration. + + + + + Initializes a new instance of the class with the specified + maximum residual and minimum number of iterations. + + + The maximum value for the residual below which the calculation is considered converged. + + + The minimum number of iterations for which the residual has to be below the maximum before + the calculation is considered converged. + + + + + Gets or sets the maximum value for the residual below which the calculation is considered + converged. + + Thrown if the Maximum is set to a negative value. + + + + Gets or sets the minimum number of iterations for which the residual has to be + below the maximum before the calculation is considered converged. + + Thrown if the BelowMaximumFor is set to a value less than 1. + + + + Determines the status of the iterative calculation based on the stop criteria stored + by the current . Result is set into Status field. + + The number of iterations that have passed so far. + The vector containing the current solution values. + The right hand side vector. + The vector containing the current residual vectors. + + The individual stop criteria may internally track the progress of the calculation based + on the invocation of this method. Therefore this method should only be called if the + calculation has moved forwards at least one step. + + + + + Gets the current calculation status. + + + + + Resets the to the pre-calculation state. + + + + + Clones the current and its settings. + + A new instance of the class. + + + + Loads the available objects from the specified assembly. + + The assembly which will be searched for setup objects. + If true, types that fail to load are simply ignored. Otherwise the exception is rethrown. + The types that should not be loaded. + + + + Loads the available objects from the specified assembly. + + The type in the assembly which should be searched for setup objects. + If true, types that fail to load are simply ignored. Otherwise the exception is rethrown. + The types that should not be loaded. + + + + Loads the available objects from the specified assembly. + + The of the assembly that should be searched for setup objects. + If true, types that fail to load are simply ignored. Otherwise the exception is rethrown. + The types that should not be loaded. + + + + Loads the available objects from the Math.NET Numerics assembly. + + The types that should not be loaded. + + + + Loads the available objects from the Math.NET Numerics assembly. + + + + + A unit preconditioner. This preconditioner does not actually do anything + it is only used when running an without + a preconditioner. + + + + + The coefficient matrix on which this preconditioner operates. + Is used to check dimensions on the different vectors that are processed. + + + + + Initializes the preconditioner and loads the internal data structures. + + + The matrix upon which the preconditioner is based. + + If is not a square matrix. + + + + Approximates the solution to the matrix equation Ax = b. + + The right hand side vector. + The left hand side vector. Also known as the result vector. + + + If and do not have the same size. + + + - or - + + + If the size of is different the number of rows of the coefficient matrix. + + + + + + True if the matrix storage format is dense. + + + + + True if all fields of this matrix can be set to any value. + False if some fields are fixed, like on a diagonal matrix. + + + + + True if the specified field can be set to any value. + False if the field is fixed, like an off-diagonal field on a diagonal matrix. + + + + + Retrieves the requested element without range checking. + + + + + Sets the element without range checking. + + + + + Evaluate the row and column at a specific data index. + + + + + True if the vector storage format is dense. + + + + + Retrieves the requested element without range checking. + + + + + Sets the element without range checking. + + + + + True if the matrix storage format is dense. + + + + + True if all fields of this matrix can be set to any value. + False if some fields are fixed, like on a diagonal matrix. + + + + + True if the specified field can be set to any value. + False if the field is fixed, like an off-diagonal field on a diagonal matrix. + + + + + Retrieves the requested element without range checking. + + + + + Sets the element without range checking. + + + + + Returns a hash code for this instance. + + + A hash code for this instance, suitable for use in hashing algorithms and data structures like a hash table. + + + + + True if the matrix storage format is dense. + + + + + True if all fields of this matrix can be set to any value. + False if some fields are fixed, like on a diagonal matrix. + + + + + True if the specified field can be set to any value. + False if the field is fixed, like an off-diagonal field on a diagonal matrix. + + + + + Gets or sets the value at the given row and column, with range checking. + + + The row of the element. + + + The column of the element. + + The value to get or set. + This method is ranged checked. and + to get and set values without range checking. + + + + Retrieves the requested element without range checking. + + + The row of the element. + + + The column of the element. + + + The requested element. + + Not range-checked. + + + + Sets the element without range checking. + + The row of the element. + The column of the element. + The value to set the element to. + WARNING: This method is not thread safe. Use "lock" with it and be sure to avoid deadlocks. + + + + Indicates whether the current object is equal to another object of the same type. + + + An object to compare with this object. + + + true if the current object is equal to the parameter; otherwise, false. + + + + + Determines whether the specified is equal to the current . + + + true if the specified is equal to the current ; otherwise, false. + + The to compare with the current . + + + + Serves as a hash function for a particular type. + + + A hash code for the current . + + + + The state array will not be modified, unless it is the same instance as the target array (which is allowed). + + + The state array will not be modified, unless it is the same instance as the target array (which is allowed). + + + The state array will not be modified, unless it is the same instance as the target array (which is allowed). + + + The state array will not be modified, unless it is the same instance as the target array (which is allowed). + + + + The array containing the row indices of the existing rows. Element "i" of the array gives the index of the + element in the array that is first non-zero element in a row "i". + The last value is equal to ValueCount, so that the number of non-zero entries in row "i" is always + given by RowPointers[i+i] - RowPointers[i]. This array thus has length RowCount+1. + + + + + An array containing the column indices of the non-zero values. Element "j" of the array + is the number of the column in matrix that contains the j-th value in the array. + + + + + Array that contains the non-zero elements of matrix. Values of the non-zero elements of matrix are mapped into the values + array using the row-major storage mapping described in a compressed sparse row (CSR) format. + + + + + Gets the number of non zero elements in the matrix. + + The number of non zero elements. + + + + True if the matrix storage format is dense. + + + + + True if all fields of this matrix can be set to any value. + False if some fields are fixed, like on a diagonal matrix. + + + + + True if the specified field can be set to any value. + False if the field is fixed, like an off-diagonal field on a diagonal matrix. + + + + + Retrieves the requested element without range checking. + + + The row of the element. + + + The column of the element. + + + The requested element. + + Not range-checked. + + + + Sets the element without range checking. + + The row of the element. + The column of the element. + The value to set the element to. + WARNING: This method is not thread safe. Use "lock" with it and be sure to avoid deadlocks. + + + + Delete value from internal storage + + Index of value in nonZeroValues array + Row number of matrix + WARNING: This method is not thread safe. Use "lock" with it and be sure to avoid deadlocks + + + + Find item Index in nonZeroValues array + + Matrix row index + Matrix column index + Item index + WARNING: This method is not thread safe. Use "lock" with it and be sure to avoid deadlocks + + + + Calculates the amount with which to grow the storage array's if they need to be + increased in size. + + The amount grown. + + + + Returns a hash code for this instance. + + + A hash code for this instance, suitable for use in hashing algorithms and data structures like a hash table. + + + + + Array that contains the indices of the non-zero values. + + + + + Array that contains the non-zero elements of the vector. + + + + + Gets the number of non-zero elements in the vector. + + + + + True if the vector storage format is dense. + + + + + Retrieves the requested element without range checking. + + + + + Sets the element without range checking. + + + + + Calculates the amount with which to grow the storage array's if they need to be + increased in size. + + The amount grown. + + + + Returns a hash code for this instance. + + + A hash code for this instance, suitable for use in hashing algorithms and data structures like a hash table. + + + + + True if the vector storage format is dense. + + + + + Gets or sets the value at the given index, with range checking. + + + The index of the element. + + The value to get or set. + This method is ranged checked. and + to get and set values without range checking. + + + + Retrieves the requested element without range checking. + + The index of the element. + The requested element. + Not range-checked. + + + + Sets the element without range checking. + + The index of the element. + The value to set the element to. + WARNING: This method is not thread safe. Use "lock" with it and be sure to avoid deadlocks. + + + + Indicates whether the current object is equal to another object of the same type. + + + An object to compare with this object. + + + true if the current object is equal to the parameter; otherwise, false. + + + + + Determines whether the specified is equal to the current . + + + true if the specified is equal to the current ; otherwise, false. + + The to compare with the current . + + + + Serves as a hash function for a particular type. + + + A hash code for the current . + + + + + Defines the generic class for Vector classes. + + Supported data types are double, single, , and . + + + + The zero value for type T. + + + + + The value of 1.0 for type T. + + + + + Negates vector and save result to + + Target vector + + + + Complex conjugates vector and save result to + + Target vector + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + The scalar to add. + The vector to store the result of the addition. + + + + Adds another vector to this vector and stores the result into the result vector. + + The vector to add to this one. + The vector to store the result of the addition. + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The vector to store the result of the subtraction. + + + + Subtracts each element of the vector from a scalar and stores the result in the result vector. + + The scalar to subtract from. + The vector to store the result of the subtraction. + + + + Subtracts another vector to this vector and stores the result into the result vector. + + The vector to subtract from this one. + The vector to store the result of the subtraction. + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + The scalar to multiply. + The vector to store the result of the multiplication. + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + + + + Computes the outer product M[i,j] = u[i]*v[j] of this and another vector and stores the result in the result matrix. + + The other vector + The matrix to store the result of the product. + + + + Divides each element of the vector by a scalar and stores the result in the result vector. + + The scalar denominator to use. + The vector to store the result of the division. + + + + Divides a scalar by each element of the vector and stores the result in the result vector. + + The scalar numerator to use. + The vector to store the result of the division. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the division. + + + + Pointwise raise this vector to an exponent and store the result into the result vector. + + The exponent to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise raise this vector to an exponent vector and store the result into the result vector. + + The exponent vector to raise this vector values to. + The vector to store the result of the pointwise power. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The result of the modulus. + + + + Pointwise applies the exponential function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Pointwise applies the natural logarithm function to each value and stores the result into the result vector. + + The vector to store the result. + + + + Adds a scalar to each element of the vector. + + The scalar to add. + A copy of the vector with the scalar added. + + + + Adds a scalar to each element of the vector and stores the result in the result vector. + + The scalar to add. + The vector to store the result of the addition. + If this vector and are not the same size. + + + + Adds another vector to this vector. + + The vector to add to this one. + A new vector containing the sum of both vectors. + If this vector and are not the same size. + + + + Adds another vector to this vector and stores the result into the result vector. + + The vector to add to this one. + The vector to store the result of the addition. + If this vector and are not the same size. + If this vector and are not the same size. + + + + Subtracts a scalar from each element of the vector. + + The scalar to subtract. + A new vector containing the subtraction of this vector and the scalar. + + + + Subtracts a scalar from each element of the vector and stores the result in the result vector. + + The scalar to subtract. + The vector to store the result of the subtraction. + If this vector and are not the same size. + + + + Subtracts each element of the vector from a scalar. + + The scalar to subtract from. + A new vector containing the subtraction of the scalar and this vector. + + + + Subtracts each element of the vector from a scalar and stores the result in the result vector. + + The scalar to subtract from. + The vector to store the result of the subtraction. + If this vector and are not the same size. + + + + Returns a negated vector. + + The negated vector. + Added as an alternative to the unary negation operator. + + + + Negates vector and save result to + + Target vector + + + + Subtracts another vector from this vector. + + The vector to subtract from this one. + A new vector containing the subtraction of the the two vectors. + If this vector and are not the same size. + + + + Subtracts another vector to this vector and stores the result into the result vector. + + The vector to subtract from this one. + The vector to store the result of the subtraction. + If this vector and are not the same size. + If this vector and are not the same size. + + + + Return vector with complex conjugate values of the source vector + + Conjugated vector + + + + Complex conjugates vector and save result to + + Target vector + + + + Multiplies a scalar to each element of the vector. + + The scalar to multiply. + A new vector that is the multiplication of the vector and the scalar. + + + + Multiplies a scalar to each element of the vector and stores the result in the result vector. + + The scalar to multiply. + The vector to store the result of the multiplication. + If this vector and are not the same size. + + + + Computes the dot product between this vector and another vector. + + The other vector. + The sum of a[i]*b[i] for all i. + If is not of the same size. + + + + + Computes the dot product between the conjugate of this vector and another vector. + + The other vector. + The sum of conj(a[i])*b[i] for all i. + If is not of the same size. + If is . + + + + + Divides each element of the vector by a scalar. + + The scalar to divide with. + A new vector that is the division of the vector and the scalar. + + + + Divides each element of the vector by a scalar and stores the result in the result vector. + + The scalar to divide with. + The vector to store the result of the division. + If this vector and are not the same size. + + + + Divides a scalar by each element of the vector. + + The scalar to divide. + A new vector that is the division of the vector and the scalar. + + + + Divides a scalar by each element of the vector and stores the result in the result vector. + + The scalar to divide. + The vector to store the result of the division. + If this vector and are not the same size. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector containing the result. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector containing the result. + + + + Computes the canonical modulus, where the result has the sign of the divisor, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Computes the remainder (vector % divisor), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector containing the result. + + + + Computes the remainder (vector % divisor), where the result has the sign of the dividend, + for each element of the vector for the given divisor. + + The scalar denominator to use. + A vector to store the results in. + + + + Computes the remainder (dividend % vector), where the result has the sign of the dividend, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector containing the result. + + + + Computes the remainder (dividend % vector), where the result has the sign of the dividend, + for the given dividend for each element of the vector. + + The scalar numerator to use. + A vector to store the results in. + + + + Pointwise multiplies this vector with another vector. + + The vector to pointwise multiply with this one. + A new vector which is the pointwise multiplication of the two vectors. + If this vector and are not the same size. + + + + Pointwise multiplies this vector with another vector and stores the result into the result vector. + + The vector to pointwise multiply with this one. + The vector to store the result of the pointwise multiplication. + If this vector and are not the same size. + If this vector and are not the same size. + + + + Pointwise divide this vector with another vector. + + The pointwise denominator vector to use. + A new vector which is the pointwise division of the two vectors. + If this vector and are not the same size. + + + + Pointwise divide this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The vector to store the result of the pointwise division. + If this vector and are not the same size. + If this vector and are not the same size. + + + + Pointwise raise this vector to an exponent. + + The exponent to raise this vector values to. + + + + Pointwise raise this vector to an exponent and store the result into the result vector. + + The exponent to raise this vector values to. + The matrix to store the result into. + If this vector and are not the same size. + + + + Pointwise raise this vector to an exponent and store the result into the result vector. + + The exponent to raise this vector values to. + + + + Pointwise raise this vector to an exponent. + + The exponent to raise this vector values to. + The vector to store the result into. + If this vector and are not the same size. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this vector with another vector. + + The pointwise denominator vector to use. + If this vector and are not the same size. + + + + Pointwise canonical modulus, where the result has the sign of the divisor, + of this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The vector to store the result of the pointwise modulus. + If this vector and are not the same size. + If this vector and are not the same size. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + of this vector with another vector. + + The pointwise denominator vector to use. + If this vector and are not the same size. + + + + Pointwise remainder (% operator), where the result has the sign of the dividend, + this vector with another vector and stores the result into the result vector. + + The pointwise denominator vector to use. + The vector to store the result of the pointwise remainder. + If this vector and are not the same size. + If this vector and are not the same size. + + + + Helper function to apply a unary function to a vector. The function + f modifies the vector given to it in place. Before its + called, a copy of the 'this' vector with the same dimension is + first created, then passed to f. The copy is returned as the result + + Function which takes a vector, modifies it in place and returns void + New instance of vector which is the result + + + + Helper function to apply a unary function which modifies a vector + in place. + + Function which takes a vector, modifies it in place and returns void + The vector where the result is to be stored + If this vector and are not the same size. + + + + Helper function to apply a binary function which takes a scalar and + a vector and modifies the latter in place. A copy of the "this" + vector is therefore first made and then passed to f together with + the scalar argument. The copy is then returned as the result + + Function which takes a scalar and a vector, modifies the vector in place and returns void + The scalar to be passed to the function + The resulting vector + + + + Helper function to apply a binary function which takes a scalar and + a vector, modifies the latter in place and returns void. + + Function which takes a scalar and a vector, modifies the vector in place and returns void + The scalar to be passed to the function + If this vector and are not the same size. + + + + Helper function to apply a binary function which takes two vectors + and modifies the latter in place. A copy of the "this" vector is + first made and then passed to f together with the other vector. The + copy is then returned as the result + + Function which takes two vectors, modifies the second in place and returns void + The other vector to be passed to the function as argument. It is not modified + The resulting vector + If this vector and are not the same size. + + + + Helper function to apply a binary function which takes two vectors + and modifies the second one in place + + Function which takes two vectors, modifies the second in place and returns void + The other vector to be passed to the function as argument. It is not modified + The resulting vector + If this vector and are not the same size. + + + + Pointwise applies the exponent function to each value. + + + + + Pointwise applies the exponent function to each value. + + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the natural logarithm function to each value. + + + + + Pointwise applies the natural logarithm function to each value. + + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the abs function to each value + + + + + Pointwise applies the abs function to each value + + The vector to store the result + + + + Pointwise applies the acos function to each value + + + + + Pointwise applies the acos function to each value + + The vector to store the result + + + + Pointwise applies the asin function to each value + + + + + Pointwise applies the asin function to each value + + The vector to store the result + + + + Pointwise applies the atan function to each value + + + + + Pointwise applies the atan function to each value + + The vector to store the result + + + + Pointwise applies the atan2 function to each value of the current + vector and a given other vector being the 'x' of atan2 and the + 'this' vector being the 'y' + + + + + + + Pointwise applies the atan2 function to each value of the current + vector and a given other vector being the 'x' of atan2 and the + 'this' vector being the 'y' + + + + + + + Pointwise applies the ceiling function to each value + + + + + Pointwise applies the ceiling function to each value + + The vector to store the result + + + + Pointwise applies the cos function to each value + + + + + Pointwise applies the cos function to each value + + The vector to store the result + + + + Pointwise applies the cosh function to each value + + + + + Pointwise applies the cosh function to each value + + The vector to store the result + + + + Pointwise applies the floor function to each value + + + + + Pointwise applies the floor function to each value + + The vector to store the result + + + + Pointwise applies the log10 function to each value + + + + + Pointwise applies the log10 function to each value + + The vector to store the result + + + + Pointwise applies the round function to each value + + + + + Pointwise applies the round function to each value + + The vector to store the result + + + + Pointwise applies the sign function to each value + + + + + Pointwise applies the sign function to each value + + The vector to store the result + + + + Pointwise applies the sin function to each value + + + + + Pointwise applies the sin function to each value + + The vector to store the result + + + + Pointwise applies the sinh function to each value + + + + + Pointwise applies the sinh function to each value + + The vector to store the result + + + + Pointwise applies the sqrt function to each value + + + + + Pointwise applies the sqrt function to each value + + The vector to store the result + + + + Pointwise applies the tan function to each value + + + + + Pointwise applies the tan function to each value + + The vector to store the result + + + + Pointwise applies the tanh function to each value + + + + + Pointwise applies the tanh function to each value + + The vector to store the result + + + + Computes the outer product M[i,j] = u[i]*v[j] of this and another vector. + + The other vector + + + + Computes the outer product M[i,j] = u[i]*v[j] of this and another vector and stores the result in the result matrix. + + The other vector + The matrix to store the result of the product. + + + + Pointwise applies the minimum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the minimum with a scalar to each value. + + The scalar value to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the maximum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the maximum with a scalar to each value. + + The scalar value to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the absolute minimum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the absolute minimum with a scalar to each value. + + The scalar value to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the absolute maximum with a scalar to each value. + + The scalar value to compare to. + + + + Pointwise applies the absolute maximum with a scalar to each value. + + The scalar value to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the minimum with the values of another vector to each value. + + The vector with the values to compare to. + + + + Pointwise applies the minimum with the values of another vector to each value. + + The vector with the values to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the maximum with the values of another vector to each value. + + The vector with the values to compare to. + + + + Pointwise applies the maximum with the values of another vector to each value. + + The vector with the values to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the absolute minimum with the values of another vector to each value. + + The vector with the values to compare to. + + + + Pointwise applies the absolute minimum with the values of another vector to each value. + + The vector with the values to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Pointwise applies the absolute maximum with the values of another vector to each value. + + The vector with the values to compare to. + + + + Pointwise applies the absolute maximum with the values of another vector to each value. + + The vector with the values to compare to. + The vector to store the result. + If this vector and are not the same size. + + + + Calculates the L1 norm of the vector, also known as Manhattan norm. + + The sum of the absolute values. + + + + Calculates the L2 norm of the vector, also known as Euclidean norm. + + The square root of the sum of the squared values. + + + + Calculates the infinity norm of the vector. + + The maximum absolute value. + + + + Computes the p-Norm. + + The p value. + Scalar ret = (sum(abs(this[i])^p))^(1/p) + + + + Normalizes this vector to a unit vector with respect to the p-norm. + + The p value. + This vector normalized to a unit vector with respect to the p-norm. + + + + Returns the value of the absolute minimum element. + + The value of the absolute minimum element. + + + + Returns the index of the absolute minimum element. + + The index of absolute minimum element. + + + + Returns the value of the absolute maximum element. + + The value of the absolute maximum element. + + + + Returns the index of the absolute maximum element. + + The index of absolute maximum element. + + + + Returns the value of maximum element. + + The value of maximum element. + + + + Returns the index of the maximum element. + + The index of maximum element. + + + + Returns the value of the minimum element. + + The value of the minimum element. + + + + Returns the index of the minimum element. + + The index of minimum element. + + + + Computes the sum of the vector's elements. + + The sum of the vector's elements. + + + + Computes the sum of the absolute value of the vector's elements. + + The sum of the absolute value of the vector's elements. + + + + Indicates whether the current object is equal to another object of the same type. + + An object to compare with this object. + + true if the current object is equal to the parameter; otherwise, false. + + + + + Determines whether the specified is equal to this instance. + + The to compare with this instance. + + true if the specified is equal to this instance; otherwise, false. + + + + + Returns a hash code for this instance. + + + A hash code for this instance, suitable for use in hashing algorithms and data structures like a hash table. + + + + + Returns an enumerator that iterates through the collection. + + + A that can be used to iterate through the collection. + + + + + Returns an enumerator that iterates through a collection. + + + An object that can be used to iterate through the collection. + + + + + Returns a string that describes the type, dimensions and shape of this vector. + + + + + Returns a string that represents the content of this vector, column by column. + + Maximum number of entries and thus lines per column. Typical value: 12; Minimum: 3. + Maximum number of chatacters per line over all columns. Typical value: 80; Minimum: 16. + Character to use to print if there is not enough space to print all entries. Typical value: "..". + Character to use to separate two coluns on a line. Typical value: " " (2 spaces). + Character to use to separate two rows/lines. Typical value: Environment.NewLine. + Function to provide a string for any given entry value. + + + + Returns a string that represents the content of this vector, column by column. + + Maximum number of entries and thus lines per column. Typical value: 12; Minimum: 3. + Maximum number of chatacters per line over all columns. Typical value: 80; Minimum: 16. + Floating point format string. Can be null. Default value: G6. + Format provider or culture. Can be null. + + + + Returns a string that represents the content of this vector, column by column. + + Floating point format string. Can be null. Default value: G6. + Format provider or culture. Can be null. + + + + Returns a string that summarizes this vector, column by column and with a type header. + + Maximum number of entries and thus lines per column. Typical value: 12; Minimum: 3. + Maximum number of chatacters per line over all columns. Typical value: 80; Minimum: 16. + Floating point format string. Can be null. Default value: G6. + Format provider or culture. Can be null. + + + + Returns a string that summarizes this vector. + The maximum number of cells can be configured in the class. + + + + + Returns a string that summarizes this vector. + The maximum number of cells can be configured in the class. + The format string is ignored. + + + + + Initializes a new instance of the Vector class. + + + + + Gets the raw vector data storage. + + + + + Gets the length or number of dimensions of this vector. + + + + Gets or sets the value at the given . + The index of the value to get or set. + The value of the vector at the given . + If is negative or + greater than the size of the vector. + + + Gets the value at the given without range checking.. + The index of the value to get or set. + The value of the vector at the given . + + + Sets the at the given without range checking.. + The index of the value to get or set. + The value to set. + + + + Resets all values to zero. + + + + + Sets all values of a subvector to zero. + + + + + Set all values whose absolute value is smaller than the threshold to zero, in-place. + + + + + Set all values that meet the predicate to zero, in-place. + + + + + Returns a deep-copy clone of the vector. + + A deep-copy clone of the vector. + + + + Set the values of this vector to the given values. + + The array containing the values to use. + If is . + If is not the same size as this vector. + + + + Copies the values of this vector into the target vector. + + The vector to copy elements into. + If is . + If is not the same size as this vector. + + + + Creates a vector containing specified elements. + + The first element to begin copying from. + The number of elements to copy. + A vector containing a copy of the specified elements. + If is not positive or + greater than or equal to the size of the vector. + If + is greater than or equal to the size of the vector. + + If is not positive. + + + + Copies the values of a given vector into a region in this vector. + + The field to start copying to + The number of fields to cpy. Must be positive. + The sub-vector to copy from. + If is + + + + Copies the requested elements from this vector to another. + + The vector to copy the elements to. + The element to start copying from. + The element to start copying to. + The number of elements to copy. + + + + Returns the data contained in the vector as an array. + The returned array will be independent from this vector. + A new memory block will be allocated for the array. + + The vector's data as an array. + + + + Returns the internal array of this vector if, and only if, this vector is stored by such an array internally. + Otherwise returns null. Changes to the returned array and the vector will affect each other. + Use ToArray instead if you always need an independent array. + + + + + Create a matrix based on this vector in column form (one single column). + + + This vector as a column matrix. + + + + + Create a matrix based on this vector in row form (one single row). + + + This vector as a row matrix. + + + + + Returns an IEnumerable that can be used to iterate through all values of the vector. + + + The enumerator will include all values, even if they are zero. + + + + + Returns an IEnumerable that can be used to iterate through all values of the vector. + + + The enumerator will include all values, even if they are zero. + + + + + Returns an IEnumerable that can be used to iterate through all values of the vector and their index. + + + The enumerator returns a Tuple with the first value being the element index + and the second value being the value of the element at that index. + The enumerator will include all values, even if they are zero. + + + + + Returns an IEnumerable that can be used to iterate through all values of the vector and their index. + + + The enumerator returns a Tuple with the first value being the element index + and the second value being the value of the element at that index. + The enumerator will include all values, even if they are zero. + + + + + Returns an IEnumerable that can be used to iterate through all non-zero values of the vector. + + + The enumerator will skip all elements with a zero value. + + + + + Returns an IEnumerable that can be used to iterate through all non-zero values of the vector and their index. + + + The enumerator returns a Tuple with the first value being the element index + and the second value being the value of the element at that index. + The enumerator will skip all elements with a zero value. + + + + + Applies a function to each value of this vector and replaces the value with its result. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value of this vector and replaces the value with its result. + The index of each value (zero-based) is passed as first argument to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value of this vector and replaces the value in the result vector. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value of this vector and replaces the value in the result vector. + The index of each value (zero-based) is passed as first argument to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value of this vector and replaces the value in the result vector. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value of this vector and replaces the value in the result vector. + The index of each value (zero-based) is passed as first argument to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value of this vector and returns the results as a new vector. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value of this vector and returns the results as a new vector. + The index of each value (zero-based) is passed as first argument to the function. + If forceMapZero is not set to true, zero values may or may not be skipped depending + on the actual data storage implementation (relevant mostly for sparse vectors). + + + + + Applies a function to each value pair of two vectors and replaces the value in the result vector. + + + + + Applies a function to each value pair of two vectors and returns the results as a new vector. + + + + + Applies a function to update the status with each value pair of two vectors and returns the resulting status. + + + + + Returns a tuple with the index and value of the first element satisfying a predicate, or null if none is found. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns a tuple with the index and values of the first element pair of two vectors of the same size satisfying a predicate, or null if none is found. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if at least one element satisfies a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if at least one element pairs of two vectors of the same size satisfies a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if all elements satisfy a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns true if all element pairs of two vectors of the same size satisfy a predicate. + Zero elements may be skipped on sparse data structures if allowed (default). + + + + + Returns a Vector containing the same values of . + + This method is included for completeness. + The vector to get the values from. + A vector containing the same values as . + If is . + + + + Returns a Vector containing the negated values of . + + The vector to get the values from. + A vector containing the negated values as . + If is . + + + + Adds two Vectors together and returns the results. + + One of the vectors to add. + The other vector to add. + The result of the addition. + If and are not the same size. + If or is . + + + + Adds a scalar to each element of a vector. + + The vector to add to. + The scalar value to add. + The result of the addition. + If is . + + + + Adds a scalar to each element of a vector. + + The scalar value to add. + The vector to add to. + The result of the addition. + If is . + + + + Subtracts two Vectors and returns the results. + + The vector to subtract from. + The vector to subtract. + The result of the subtraction. + If and are not the same size. + If or is . + + + + Subtracts a scalar from each element of a vector. + + The vector to subtract from. + The scalar value to subtract. + The result of the subtraction. + If is . + + + + Substracts each element of a vector from a scalar. + + The scalar value to subtract from. + The vector to subtract. + The result of the subtraction. + If is . + + + + Multiplies a vector with a scalar. + + The vector to scale. + The scalar value. + The result of the multiplication. + If is . + + + + Multiplies a vector with a scalar. + + The scalar value. + The vector to scale. + The result of the multiplication. + If is . + + + + Computes the dot product between two Vectors. + + The left row vector. + The right column vector. + The dot product between the two vectors. + If and are not the same size. + If or is . + + + + Divides a scalar with a vector. + + The scalar to divide. + The vector. + The result of the division. + If is . + + + + Divides a vector with a scalar. + + The vector to divide. + The scalar value. + The result of the division. + If is . + + + + Pointwise divides two Vectors. + + The vector to divide. + The other vector. + The result of the division. + If and are not the same size. + If is . + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + of each element of the vector of the given divisor. + + The vector whose elements we want to compute the remainder of. + The divisor to use. + If is . + + + + Computes the remainder (% operator), where the result has the sign of the dividend, + of the given dividend of each element of the vector. + + The dividend we want to compute the remainder of. + The vector whose elements we want to use as divisor. + If is . + + + + Computes the pointwise remainder (% operator), where the result has the sign of the dividend, + of each element of two vectors. + + The vector whose elements we want to compute the remainder of. + The divisor to use. + If and are not the same size. + If is . + + + + Computes the sqrt of a vector pointwise + + The input vector + + + + + Computes the exponential of a vector pointwise + + The input vector + + + + + Computes the log of a vector pointwise + + The input vector + + + + + Computes the log10 of a vector pointwise + + The input vector + + + + + Computes the sin of a vector pointwise + + The input vector + + + + + Computes the cos of a vector pointwise + + The input vector + + + + + Computes the tan of a vector pointwise + + The input vector + + + + + Computes the asin of a vector pointwise + + The input vector + + + + + Computes the acos of a vector pointwise + + The input vector + + + + + Computes the atan of a vector pointwise + + The input vector + + + + + Computes the sinh of a vector pointwise + + The input vector + + + + + Computes the cosh of a vector pointwise + + The input vector + + + + + Computes the tanh of a vector pointwise + + The input vector + + + + + Computes the absolute value of a vector pointwise + + The input vector + + + + + Computes the floor of a vector pointwise + + The input vector + + + + + Computes the ceiling of a vector pointwise + + The input vector + + + + + Computes the rounded value of a vector pointwise + + The input vector + + + + + Converts a vector to single precision. + + + + + Converts a vector to double precision. + + + + + Converts a vector to single precision complex numbers. + + + + + Converts a vector to double precision complex numbers. + + + + + Gets a single precision complex vector with the real parts from the given vector. + + + + + Gets a double precision complex vector with the real parts from the given vector. + + + + + Gets a real vector representing the real parts of a complex vector. + + + + + Gets a real vector representing the real parts of a complex vector. + + + + + Gets a real vector representing the imaginary parts of a complex vector. + + + + + Gets a real vector representing the imaginary parts of a complex vector. + + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + + Predictor matrix X + Response vector Y + The direct method to be used to compute the regression. + Best fitting vector for model parameters β + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + + Predictor matrix X + Response matrix Y + The direct method to be used to compute the regression. + Best fitting vector for model parameters β + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + + List of predictor-arrays. + List of responses + True if an intercept should be added as first artificial predictor value. Default = false. + The direct method to be used to compute the regression. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + Uses the cholesky decomposition of the normal equations. + + Sequence of predictor-arrays and their response. + True if an intercept should be added as first artificial predictor value. Default = false. + The direct method to be used to compute the regression. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + Uses the cholesky decomposition of the normal equations. + + Predictor matrix X + Response vector Y + Best fitting vector for model parameters β + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + Uses the cholesky decomposition of the normal equations. + + Predictor matrix X + Response matrix Y + Best fitting vector for model parameters β + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + Uses the cholesky decomposition of the normal equations. + + List of predictor-arrays. + List of responses + True if an intercept should be added as first artificial predictor value. Default = false. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + Uses the cholesky decomposition of the normal equations. + + Sequence of predictor-arrays and their response. + True if an intercept should be added as first artificial predictor value. Default = false. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + Uses an orthogonal decomposition and is therefore more numerically stable than the normal equations but also slower. + + Predictor matrix X + Response vector Y + Best fitting vector for model parameters β + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + Uses an orthogonal decomposition and is therefore more numerically stable than the normal equations but also slower. + + Predictor matrix X + Response matrix Y + Best fitting vector for model parameters β + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + Uses an orthogonal decomposition and is therefore more numerically stable than the normal equations but also slower. + + List of predictor-arrays. + List of responses + True if an intercept should be added as first artificial predictor value. Default = false. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + Uses an orthogonal decomposition and is therefore more numerically stable than the normal equations but also slower. + + Sequence of predictor-arrays and their response. + True if an intercept should be added as first artificial predictor value. Default = false. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + Uses a singular value decomposition and is therefore more numerically stable (especially if ill-conditioned) than the normal equations or QR but also slower. + + Predictor matrix X + Response vector Y + Best fitting vector for model parameters β + + + + Find the model parameters β such that X*β with predictor X becomes as close to response Y as possible, with least squares residuals. + Uses a singular value decomposition and is therefore more numerically stable (especially if ill-conditioned) than the normal equations or QR but also slower. + + Predictor matrix X + Response matrix Y + Best fitting vector for model parameters β + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + Uses a singular value decomposition and is therefore more numerically stable (especially if ill-conditioned) than the normal equations or QR but also slower. + + List of predictor-arrays. + List of responses + True if an intercept should be added as first artificial predictor value. Default = false. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Find the model parameters β such that their linear combination with all predictor-arrays in X become as close to their response in Y as possible, with least squares residuals. + Uses a singular value decomposition and is therefore more numerically stable (especially if ill-conditioned) than the normal equations or QR but also slower. + + Sequence of predictor-arrays and their response. + True if an intercept should be added as first artificial predictor value. Default = false. + Best fitting list of model parameters β for each element in the predictor-arrays. + + + + Least-Squares fitting the points (x,y) to a line y : x -> a+b*x, + returning its best fitting parameters as (a, b) tuple, + where a is the intercept and b the slope. + + Predictor (independent) + Response (dependent) + + + + Least-Squares fitting the points (x,y) to a line y : x -> a+b*x, + returning its best fitting parameters as (a, b) tuple, + where a is the intercept and b the slope. + + Predictor-Response samples as tuples + + + + Weighted Linear Regression using normal equations. + + Predictor matrix X + Response vector Y + Weight matrix W, usually diagonal with an entry for each predictor (row). + + + + Weighted Linear Regression using normal equations. + + Predictor matrix X + Response matrix Y + Weight matrix W, usually diagonal with an entry for each predictor (row). + + + + Weighted Linear Regression using normal equations. + + Predictor matrix X + Response vector Y + Weight matrix W, usually diagonal with an entry for each predictor (row). + True if an intercept should be added as first artificial predictor value. Default = false. + + + + Weighted Linear Regression using normal equations. + + List of sample vectors (predictor) together with their response. + List of weights, one for each sample. + True if an intercept should be added as first artificial predictor value. Default = false. + + + + Locally-Weighted Linear Regression using normal equations. + + + + + Locally-Weighted Linear Regression using normal equations. + + + + + First Order AB method(same as Forward Euler) + + Initial value + Start Time + End Time + Size of output array(the larger, the finer) + ode model + approximation with size N + + + + Second Order AB Method + + Initial value 1 + Start Time + End Time + Size of output array(the larger, the finer) + ode model + approximation with size N + + + + Third Order AB Method + + Initial value 1 + Start Time + End Time + Size of output array(the larger, the finer) + ode model + approximation with size N + + + + Fourth Order AB Method + + Initial value 1 + Start Time + End Time + Size of output array(the larger, the finer) + ode model + approximation with size N + + + + ODE Solver Algorithms + + + + + Second Order Runge-Kutta method + + initial value + start time + end time + Size of output array(the larger, the finer) + ode function + approximations + + + + Fourth Order Runge-Kutta method + + initial value + start time + end time + Size of output array(the larger, the finer) + ode function + approximations + + + + Second Order Runge-Kutta to solve ODE SYSTEM + + initial vector + start time + end time + Size of output array(the larger, the finer) + ode function + approximations + + + + Fourth Order Runge-Kutta to solve ODE SYSTEM + + initial vector + start time + end time + Size of output array(the larger, the finer) + ode function + approximations + + + + Class to represent a permutation for a subset of the natural numbers. + + + + + Entry _indices[i] represents the location to which i is permuted to. + + + + + Initializes a new instance of the Permutation class. + + An array which represents where each integer is permuted too: indices[i] represents that integer i + is permuted to location indices[i]. + + + + Gets the number of elements this permutation is over. + + + + + Computes where permutes too. + + The index to permute from. + The index which is permuted to. + + + + Computes the inverse of the permutation. + + The inverse of the permutation. + + + + Construct an array from a sequence of inversions. + + + From wikipedia: the permutation 12043 has the inversions (0,2), (1,2) and (3,4). This would be + encoded using the array [22244]. + + The set of inversions to construct the permutation from. + A permutation generated from a sequence of inversions. + + + + Construct a sequence of inversions from the permutation. + + + From wikipedia: the permutation 12043 has the inversions (0,2), (1,2) and (3,4). This would be + encoded using the array [22244]. + + A sequence of inversions. + + + + Checks whether the array represents a proper permutation. + + An array which represents where each integer is permuted too: indices[i] represents that integer i + is permuted to location indices[i]. + True if represents a proper permutation, false otherwise. + + + + Utilities for working with floating point numbers. + + + + Useful links: + + + http://docs.sun.com/source/806-3568/ncg_goldberg.html#689 - What every computer scientist should know about floating-point arithmetic + + + http://en.wikipedia.org/wiki/Machine_epsilon - Gives the definition of machine epsilon + + + + + + + + Compares two doubles and determines which double is bigger. + a < b -> -1; a ~= b (almost equal according to parameter) -> 0; a > b -> +1. + + The first value. + The second value. + The absolute accuracy required for being almost equal. + + + + Compares two doubles and determines which double is bigger. + a < b -> -1; a ~= b (almost equal according to parameter) -> 0; a > b -> +1. + + The first value. + The second value. + The number of decimal places on which the values must be compared. Must be 1 or larger. + + + + Compares two doubles and determines which double is bigger. + a < b -> -1; a ~= b (almost equal according to parameter) -> 0; a > b -> +1. + + The first value. + The second value. + The relative accuracy required for being almost equal. + + + + Compares two doubles and determines which double is bigger. + a < b -> -1; a ~= b (almost equal according to parameter) -> 0; a > b -> +1. + + The first value. + The second value. + The number of decimal places on which the values must be compared. Must be 1 or larger. + + + + Compares two doubles and determines which double is bigger. + a < b -> -1; a ~= b (almost equal according to parameter) -> 0; a > b -> +1. + + The first value. + The second value. + The maximum error in terms of Units in Last Place (ulps), i.e. the maximum number of decimals that may be different. Must be 1 or larger. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The absolute accuracy required for being almost equal. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The absolute accuracy required for being almost equal. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The relative accuracy required for being almost equal. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The relative accuracy required for being almost equal. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the tolerance or not. Equality comparison is based on the binary representation. + + The first value. + The second value. + The maximum number of floating point values for which the two values are considered equal. Must be 1 or larger. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is larger than the second + value to within the tolerance or not. Equality comparison is based on the binary representation. + + The first value. + The second value. + The maximum number of floating point values for which the two values are considered equal. Must be 1 or larger. + true if the first value is larger than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of thg. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of thg. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The absolute accuracy required for being almost equal. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The absolute accuracy required for being almost equal. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The number of decimal places. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The number of decimal places. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The relative accuracy required for being almost equal. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the specified number of decimal places or not. + + The first value. + The second value. + The relative accuracy required for being almost equal. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the tolerance or not. Equality comparison is based on the binary representation. + + The first value. + The second value. + The maximum number of floating point values for which the two values are considered equal. Must be 1 or larger. + true if the first value is smaller than the second value; otherwise false. + + + + Compares two doubles and determines if the first value is smaller than the second + value to within the tolerance or not. Equality comparison is based on the binary representation. + + The first value. + The second value. + The maximum number of floating point values for which the two values are considered equal. Must be 1 or larger. + true if the first value is smaller than the second value; otherwise false. + + + + The number of binary digits used to represent the binary number for a double precision floating + point value. i.e. there are this many digits used to represent the + actual number, where in a number as: 0.134556 * 10^5 the digits are 0.134556 and the exponent is 5. + + + + + The number of binary digits used to represent the binary number for a single precision floating + point value. i.e. there are this many digits used to represent the + actual number, where in a number as: 0.134556 * 10^5 the digits are 0.134556 and the exponent is 5. + + + + + Standard epsilon, the maximum relative precision of IEEE 754 double-precision floating numbers (64 bit). + According to the definition of Prof. Demmel and used in LAPACK and Scilab. + + + + + Standard epsilon, the maximum relative precision of IEEE 754 double-precision floating numbers (64 bit). + According to the definition of Prof. Higham and used in the ISO C standard and MATLAB. + + + + + Standard epsilon, the maximum relative precision of IEEE 754 single-precision floating numbers (32 bit). + According to the definition of Prof. Demmel and used in LAPACK and Scilab. + + + + + Standard epsilon, the maximum relative precision of IEEE 754 single-precision floating numbers (32 bit). + According to the definition of Prof. Higham and used in the ISO C standard and MATLAB. + + + + + Actual double precision machine epsilon, the smallest number that can be subtracted from 1, yielding a results different than 1. + This is also known as unit roundoff error. According to the definition of Prof. Demmel. + On a standard machine this is equivalent to `DoublePrecision`. + + + + + Actual double precision machine epsilon, the smallest number that can be added to 1, yielding a results different than 1. + This is also known as unit roundoff error. According to the definition of Prof. Higham. + On a standard machine this is equivalent to `PositiveDoublePrecision`. + + + + + The number of significant decimal places of double-precision floating numbers (64 bit). + + + + + The number of significant decimal places of single-precision floating numbers (32 bit). + + + + + Value representing 10 * 2^(-53) = 1.11022302462516E-15 + + + + + Value representing 10 * 2^(-24) = 5.96046447753906E-07 + + + + + Returns the magnitude of the number. + + The value. + The magnitude of the number. + + + + Returns the magnitude of the number. + + The value. + The magnitude of the number. + + + + Returns the number divided by it's magnitude, effectively returning a number between -10 and 10. + + The value. + The value of the number. + + + + Returns a 'directional' long value. This is a long value which acts the same as a double, + e.g. a negative double value will return a negative double value starting at 0 and going + more negative as the double value gets more negative. + + The input double value. + A long value which is roughly the equivalent of the double value. + + + + Returns a 'directional' int value. This is a int value which acts the same as a float, + e.g. a negative float value will return a negative int value starting at 0 and going + more negative as the float value gets more negative. + + The input float value. + An int value which is roughly the equivalent of the double value. + + + + Increments a floating point number to the next bigger number representable by the data type. + + The value which needs to be incremented. + How many times the number should be incremented. + + The incrementation step length depends on the provided value. + Increment(double.MaxValue) will return positive infinity. + + The next larger floating point value. + + + + Decrements a floating point number to the next smaller number representable by the data type. + + The value which should be decremented. + How many times the number should be decremented. + + The decrementation step length depends on the provided value. + Decrement(double.MinValue) will return negative infinity. + + The next smaller floating point value. + + + + Forces small numbers near zero to zero, according to the specified absolute accuracy. + + The real number to coerce to zero, if it is almost zero. + The maximum count of numbers between the zero and the number . + + Zero if || is fewer than numbers from zero, otherwise. + + + + + Forces small numbers near zero to zero, according to the specified absolute accuracy. + + The real number to coerce to zero, if it is almost zero. + The maximum count of numbers between the zero and the number . + + Zero if || is fewer than numbers from zero, otherwise. + + + Thrown if is smaller than zero. + + + + + Forces small numbers near zero to zero, according to the specified absolute accuracy. + + The real number to coerce to zero, if it is almost zero. + The absolute threshold for to consider it as zero. + Zero if || is smaller than , otherwise. + + Thrown if is smaller than zero. + + + + + Forces small numbers near zero to zero. + + The real number to coerce to zero, if it is almost zero. + Zero if || is smaller than 2^(-53) = 1.11e-16, otherwise. + + + + Determines the range of floating point numbers that will match the specified value with the given tolerance. + + The value. + The ulps difference. + + Thrown if is smaller than zero. + + Tuple of the bottom and top range ends. + + + + Returns the floating point number that will match the value with the tolerance on the maximum size (i.e. the result is + always bigger than the value) + + The value. + The ulps difference. + The maximum floating point number which is larger than the given . + + + + Returns the floating point number that will match the value with the tolerance on the minimum size (i.e. the result is + always smaller than the value) + + The value. + The ulps difference. + The minimum floating point number which is smaller than the given . + + + + Determines the range of ulps that will match the specified value with the given tolerance. + + The value. + The relative difference. + + Thrown if is smaller than zero. + + + Thrown if is double.PositiveInfinity or double.NegativeInfinity. + + + Thrown if is double.NaN. + + + Tuple with the number of ULPS between the value and the value - relativeDifference as first, + and the number of ULPS between the value and the value + relativeDifference as second value. + + + + + Evaluates the count of numbers between two double numbers + + The first parameter. + The second parameter. + The second number is included in the number, thus two equal numbers evaluate to zero and two neighbor numbers evaluate to one. Therefore, what is returned is actually the count of numbers between plus 1. + The number of floating point values between and . + + Thrown if is double.PositiveInfinity or double.NegativeInfinity. + + + Thrown if is double.NaN. + + + Thrown if is double.PositiveInfinity or double.NegativeInfinity. + + + Thrown if is double.NaN. + + + + + Evaluates the minimum distance to the next distinguishable number near the argument value. + + The value used to determine the minimum distance. + + Relative Epsilon (positive double or NaN). + + Evaluates the negative epsilon. The more common positive epsilon is equal to two times this negative epsilon. + + + + + Evaluates the minimum distance to the next distinguishable number near the argument value. + + The value used to determine the minimum distance. + + Relative Epsilon (positive float or NaN). + + Evaluates the negative epsilon. The more common positive epsilon is equal to two times this negative epsilon. + + + + + Evaluates the minimum distance to the next distinguishable number near the argument value. + + The value used to determine the minimum distance. + Relative Epsilon (positive double or NaN) + Evaluates the positive epsilon. See also + + + + + Evaluates the minimum distance to the next distinguishable number near the argument value. + + The value used to determine the minimum distance. + Relative Epsilon (positive float or NaN) + Evaluates the positive epsilon. See also + + + + + Calculates the actual (negative) double precision machine epsilon - the smallest number that can be subtracted from 1, yielding a results different than 1. + This is also known as unit roundoff error. According to the definition of Prof. Demmel. + + Positive Machine epsilon + + + + Calculates the actual positive double precision machine epsilon - the smallest number that can be added to 1, yielding a results different than 1. + This is also known as unit roundoff error. According to the definition of Prof. Higham. + + Machine epsilon + + + + Compares two doubles and determines if they are equal + within the specified maximum absolute error. + + The norm of the first value (can be negative). + The norm of the second value (can be negative). + The norm of the difference of the two values (can be negative). + The absolute accuracy required for being almost equal. + True if both doubles are almost equal up to the specified maximum absolute error, false otherwise. + + + + Compares two doubles and determines if they are equal + within the specified maximum absolute error. + + The first value. + The second value. + The absolute accuracy required for being almost equal. + True if both doubles are almost equal up to the specified maximum absolute error, false otherwise. + + + + Compares two doubles and determines if they are equal + within the specified maximum error. + + The norm of the first value (can be negative). + The norm of the second value (can be negative). + The norm of the difference of the two values (can be negative). + The accuracy required for being almost equal. + True if both doubles are almost equal up to the specified maximum error, false otherwise. + + + + Compares two doubles and determines if they are equal + within the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + True if both doubles are almost equal up to the specified maximum error, false otherwise. + + + + Compares two doubles and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two complex and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two complex and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two complex and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two doubles and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two complex and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two complex and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two complex and determines if they are equal within + the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Checks whether two real numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Checks whether two real numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Checks whether two Compex numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Checks whether two Compex numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Checks whether two real numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Checks whether two real numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Checks whether two Compex numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Checks whether two Compex numbers are almost equal. + + The first number + The second number + true if the two values differ by no more than 10 * 2^(-52); false otherwise. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not, using the + number of decimal places as an absolute measure. + + + + The values are equal if the difference between the two numbers is smaller than 0.5e-decimalPlaces. We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The norm of the first value (can be negative). + The norm of the second value (can be negative). + The norm of the difference of the two values (can be negative). + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not, using the + number of decimal places as an absolute measure. + + + + The values are equal if the difference between the two numbers is smaller than 0.5e-decimalPlaces. We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not. If the numbers + are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The norm of the first value (can be negative). + The norm of the second value (can be negative). + The norm of the difference of the two values (can be negative). + The number of decimal places. + Thrown if is smaller than zero. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not. If the numbers + are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + + + The values are equal if the difference between the two numbers is smaller than 10^(-numberOfDecimalPlaces). We divide by + two so that we have half the range on each side of the numbers, e.g. if == 2, then 0.01 will equal between + 0.005 and 0.015, but not 0.02 and not 0.00 + + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not, using the + number of decimal places as an absolute measure. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not, using the + number of decimal places as an absolute measure. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not, using the + number of decimal places as an absolute measure. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not, using the + number of decimal places as an absolute measure. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not. If the numbers + are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not. If the numbers + are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not. If the numbers + are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the specified number of decimal places or not. If the numbers + are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + The first value. + The second value. + The number of decimal places. + + + + Compares two doubles and determines if they are equal to within the tolerance or not. Equality comparison is based on the binary representation. + + + + Determines the 'number' of floating point numbers between two values (i.e. the number of discrete steps + between the two numbers) and then checks if that is within the specified tolerance. So if a tolerance + of 1 is passed then the result will be true only if the two numbers have the same binary representation + OR if they are two adjacent numbers that only differ by one step. + + + The comparison method used is explained in http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm . The article + at http://www.extremeoptimization.com/resources/Articles/FPDotNetConceptsAndFormats.aspx explains how to transform the C code to + .NET enabled code without using pointers and unsafe code. + + + The first value. + The second value. + The maximum number of floating point values between the two values. Must be 1 or larger. + Thrown if is smaller than one. + + + + Compares two floats and determines if they are equal to within the tolerance or not. Equality comparison is based on the binary representation. + + The first value. + The second value. + The maximum number of floating point values between the two values. Must be 1 or larger. + Thrown if is smaller than one. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The number of decimal places. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two lists of doubles and determines if they are equal within the + specified maximum error. + + The first value list. + The second value list. + The accuracy required for being almost equal. + + + + Compares two vectors and determines if they are equal within the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two vectors and determines if they are equal within the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two vectors and determines if they are equal to within the specified number + of decimal places or not, using the number of decimal places as an absolute measure. + + The first value. + The second value. + The number of decimal places. + + + + Compares two vectors and determines if they are equal to within the specified number of decimal places or not. + If the numbers are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + The first value. + The second value. + The number of decimal places. + + + + Compares two matrices and determines if they are equal within the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two matrices and determines if they are equal within the specified maximum error. + + The first value. + The second value. + The accuracy required for being almost equal. + + + + Compares two matrices and determines if they are equal to within the specified number + of decimal places or not, using the number of decimal places as an absolute measure. + + The first value. + The second value. + The number of decimal places. + + + + Compares two matrices and determines if they are equal to within the specified number of decimal places or not. + If the numbers are very close to zero an absolute difference is compared, otherwise the relative difference is compared. + + The first value. + The second value. + The number of decimal places. + + + + Support Interface for Precision Operations (like AlmostEquals). + + Type of the implementing class. + + + + Returns a Norm of a value of this type, which is appropriate for measuring how + close this value is to zero. + + A norm of this value. + + + + Returns a Norm of the difference of two values of this type, which is + appropriate for measuring how close together these two values are. + + The value to compare with. + A norm of the difference between this and the other value. + + + + Consistency vs. performance trade-off between runs on different machines. + + + + Consistent on the same CPU only (maximum performance) + + + Consistent on Intel and compatible CPUs with SSE2 support (maximum compatibility) + + + Consistent on Intel CPUs supporting SSE2 or later + + + Consistent on Intel CPUs supporting SSE4.2 or later + + + Consistent on Intel CPUs supporting AVX or later + + + Consistent on Intel CPUs supporting AVX2 or later + + + + Use the best provider available. + + + + + Use a specific provider if configured, e.g. using the + "MathNetNumericsFFTProvider" environment variable, + or fall back to the best provider. + + + + + Try to find out whether the provider is available, at least in principle. + Verification may still fail if available, but it will certainly fail if unavailable. + + + + + Initialize and verify that the provided is indeed available. If not, fall back to alternatives like the managed provider + + + + + Try to find out whether the provider is available, at least in principle. + Verification may still fail if available, but it will certainly fail if unavailable. + + + + + Initialize and verify that the provided is indeed available. If not, fall back to alternatives like the managed provider + + + + + How to transpose a matrix. + + + + + Don't transpose a matrix. + + + + + Transpose a matrix. + + + + + Conjugate transpose a complex matrix. + + If a conjugate transpose is used with a real matrix, then the matrix is just transposed. + + + + Types of matrix norms. + + + + + The 1-norm. + + + + + The Frobenius norm. + + + + + The infinity norm. + + + + + The largest absolute value norm. + + + + + Interface to linear algebra algorithms that work off 1-D arrays. + + + + + Try to find out whether the provider is available, at least in principle. + Verification may still fail if available, but it will certainly fail if unavailable. + + + + + Initialize and verify that the provided is indeed available. If not, fall back to alternatives like the managed provider + + + + + Interface to linear algebra algorithms that work off 1-D arrays. + + Supported data types are Double, Single, Complex, and Complex32. + + + + Adds a scaled vector to another: result = y + alpha*x. + + The vector to update. + The value to scale by. + The vector to add to . + The result of the addition. + This is similar to the AXPY BLAS routine. + + + + Scales an array. Can be used to scale a vector and a matrix. + + The scalar. + The values to scale. + This result of the scaling. + This is similar to the SCAL BLAS routine. + + + + Conjugates an array. Can be used to conjugate a vector and a matrix. + + The values to conjugate. + This result of the conjugation. + + + + Computes the dot product of x and y. + + The vector x. + The vector y. + The dot product of x and y. + This is equivalent to the DOT BLAS routine. + + + + Does a point wise add of two arrays z = x + y. This can be used + to add vectors or matrices. + + The array x. + The array y. + The result of the addition. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise subtraction of two arrays z = x - y. This can be used + to subtract vectors or matrices. + + The array x. + The array y. + The result of the subtraction. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise multiplication of two arrays z = x * y. This can be used + to multiply elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise multiplication. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise division of two arrays z = x / y. This can be used + to divide elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise division. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise power of two arrays z = x ^ y. This can be used + to raise elements of vectors or matrices to the powers of another vector or matrix. + + The array x. + The array y. + The result of the point wise power. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Computes the requested of the matrix. + + The type of norm to compute. + The number of rows. + The number of columns. + The matrix to compute the norm from. + + The requested of the matrix. + + + + + Multiples two matrices. result = x * y + + The x matrix. + The number of rows in the x matrix. + The number of columns in the x matrix. + The y matrix. + The number of rows in the y matrix. + The number of columns in the y matrix. + Where to store the result of the multiplication. + This is a simplified version of the BLAS GEMM routine with alpha + set to 1.0 and beta set to 0.0, and x and y are not transposed. + + + + Multiplies two matrices and updates another with the result. c = alpha*op(a)*op(b) + beta*c + + How to transpose the matrix. + How to transpose the matrix. + The value to scale matrix. + The a matrix. + The number of rows in the matrix. + The number of columns in the matrix. + The b matrix + The number of rows in the matrix. + The number of columns in the matrix. + The value to scale the matrix. + The c matrix. + + + + Computes the LUP factorization of A. P*A = L*U. + + An by matrix. The matrix is overwritten with the + the LU factorization on exit. The lower triangular factor L is stored in under the diagonal of (the diagonal is always 1.0 + for the L factor). The upper triangular factor U is stored on and above the diagonal of . + The order of the square matrix . + On exit, it contains the pivot indices. The size of the array must be . + This is equivalent to the GETRF LAPACK routine. + + + + Computes the inverse of matrix using LU factorization. + + The N by N matrix to invert. Contains the inverse On exit. + The order of the square matrix . + This is equivalent to the GETRF and GETRI LAPACK routines. + + + + Computes the inverse of a previously factored matrix. + + The LU factored N by N matrix. Contains the inverse On exit. + The order of the square matrix . + The pivot indices of . + This is equivalent to the GETRI LAPACK routine. + + + + Solves A*X=B for X using LU factorization. + + The number of columns of B. + The square matrix A. + The order of the square matrix . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRF and GETRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The number of columns of B. + The factored A matrix. + The order of the square matrix . + The pivot indices of . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRS LAPACK routine. + + + + Computes the Cholesky factorization of A. + + On entry, a square, positive definite matrix. On exit, the matrix is overwritten with the + the Cholesky factorization. + The number of rows or columns in the matrix. + This is equivalent to the POTRF LAPACK routine. + + + + Solves A*X=B for X using Cholesky factorization. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRF add POTRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRS LAPACK routine. + + + + Computes the full QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the R matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A M by M matrix that holds the Q matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Computes the thin QR factorization of A where M > N. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the Q matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A N by N matrix that holds the R matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Solves A*X=B for X using QR factorization of A. + + The A matrix. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Solves A*X=B for X using a previously QR factored matrix. + + The Q matrix obtained by QR factor. This is only used for the managed provider and can be + null for the native provider. The native provider uses the Q portion stored in the R matrix. + The R matrix obtained by calling . + The number of rows in the A matrix. + The number of columns in the A matrix. + Contains additional information on Q. Only used for the native solver + and can be null for the managed provider. + On entry the B matrix; on exit the X matrix. + The number of columns of B. + On exit, the solution matrix. + Rows must be greater or equal to columns. + The type of QR factorization to perform. + + + + Computes the singular value decomposition of A. + + Compute the singular U and VT vectors or not. + On entry, the M by N matrix to decompose. On exit, A may be overwritten. + The number of rows in the A matrix. + The number of columns in the A matrix. + The singular values of A in ascending value. + If is true, on exit U contains the left + singular vectors. + If is true, on exit VT contains the transposed + right singular vectors. + This is equivalent to the GESVD LAPACK routine. + + + + Solves A*X=B for X using the singular value decomposition of A. + + On entry, the M by N matrix to decompose. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Solves A*X=B for X using a previously SVD decomposed matrix. + + The number of rows in the A matrix. + The number of columns in the A matrix. + The s values returned by . + The left singular vectors returned by . + The right singular vectors returned by . + The B matrix + The number of columns of B. + On exit, the solution matrix. + + + + Computes the eigenvalues and eigenvectors of a matrix. + + Whether the matrix is symmetric or not. + The order of the matrix. + The matrix to decompose. The lenth of the array must be order * order. + On output, the matrix contains the eigen vectors. The lenth of the array must be order * order. + On output, the eigen values (λ) of matrix in ascending value. The length of the arry must . + On output, the block diagonal eigenvalue matrix. The lenth of the array must be order * order. + + + + Use the best provider available. + + + + + Use a specific provider if configured, e.g. using the + "MathNetNumericsLAProvider" environment variable, + or fall back to the best provider. + + + + + The managed linear algebra provider. + + + The managed linear algebra provider. + + + The managed linear algebra provider. + + + The managed linear algebra provider. + + + The managed linear algebra provider. + + + + + Adds a scaled vector to another: result = y + alpha*x. + + The vector to update. + The value to scale by. + The vector to add to . + The result of the addition. + This is similar to the AXPY BLAS routine. + + + + Scales an array. Can be used to scale a vector and a matrix. + + The scalar. + The values to scale. + This result of the scaling. + This is similar to the SCAL BLAS routine. + + + + Conjugates an array. Can be used to conjugate a vector and a matrix. + + The values to conjugate. + This result of the conjugation. + + + + Computes the dot product of x and y. + + The vector x. + The vector y. + The dot product of x and y. + This is equivalent to the DOT BLAS routine. + + + + Does a point wise add of two arrays z = x + y. This can be used + to add vectors or matrices. + + The array x. + The array y. + The result of the addition. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise subtraction of two arrays z = x - y. This can be used + to subtract vectors or matrices. + + The array x. + The array y. + The result of the subtraction. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise multiplication of two arrays z = x * y. This can be used + to multiple elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise multiplication. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise division of two arrays z = x / y. This can be used + to divide elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise division. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise power of two arrays z = x ^ y. This can be used + to raise elements of vectors or matrices to the powers of another vector or matrix. + + The array x. + The array y. + The result of the point wise power. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Computes the requested of the matrix. + + The type of norm to compute. + The number of rows. + The number of columns. + The matrix to compute the norm from. + + The requested of the matrix. + + + + + Multiples two matrices. result = x * y + + The x matrix. + The number of rows in the x matrix. + The number of columns in the x matrix. + The y matrix. + The number of rows in the y matrix. + The number of columns in the y matrix. + Where to store the result of the multiplication. + This is a simplified version of the BLAS GEMM routine with alpha + set to 1.0 and beta set to 0.0, and x and y are not transposed. + + + + Multiplies two matrices and updates another with the result. c = alpha*op(a)*op(b) + beta*c + + How to transpose the matrix. + How to transpose the matrix. + The value to scale matrix. + The a matrix. + The number of rows in the matrix. + The number of columns in the matrix. + The b matrix + The number of rows in the matrix. + The number of columns in the matrix. + The value to scale the matrix. + The c matrix. + + + + Cache-Oblivious Matrix Multiplication + + if set to true transpose matrix A. + if set to true transpose matrix B. + The value to scale the matrix A with. + The matrix A. + Row-shift of the left matrix + Column-shift of the left matrix + The matrix B. + Row-shift of the right matrix + Column-shift of the right matrix + The matrix C. + Row-shift of the result matrix + Column-shift of the result matrix + The number of rows of matrix op(A) and of the matrix C. + The number of columns of matrix op(B) and of the matrix C. + The number of columns of matrix op(A) and the rows of the matrix op(B). + The constant number of rows of matrix op(A) and of the matrix C. + The constant number of columns of matrix op(B) and of the matrix C. + The constant number of columns of matrix op(A) and the rows of the matrix op(B). + Indicates if this is the first recursion. + + + + Computes the LUP factorization of A. P*A = L*U. + + An by matrix. The matrix is overwritten with the + the LU factorization on exit. The lower triangular factor L is stored in under the diagonal of (the diagonal is always 1.0 + for the L factor). The upper triangular factor U is stored on and above the diagonal of . + The order of the square matrix . + On exit, it contains the pivot indices. The size of the array must be . + This is equivalent to the GETRF LAPACK routine. + + + + Computes the inverse of matrix using LU factorization. + + The N by N matrix to invert. Contains the inverse On exit. + The order of the square matrix . + This is equivalent to the GETRF and GETRI LAPACK routines. + + + + Computes the inverse of a previously factored matrix. + + The LU factored N by N matrix. Contains the inverse On exit. + The order of the square matrix . + The pivot indices of . + This is equivalent to the GETRI LAPACK routine. + + + + Solves A*X=B for X using LU factorization. + + The number of columns of B. + The square matrix A. + The order of the square matrix . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRF and GETRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The number of columns of B. + The factored A matrix. + The order of the square matrix . + The pivot indices of . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRS LAPACK routine. + + + + Computes the Cholesky factorization of A. + + On entry, a square, positive definite matrix. On exit, the matrix is overwritten with the + the Cholesky factorization. + The number of rows or columns in the matrix. + This is equivalent to the POTRF LAPACK routine. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves A*X=B for X using Cholesky factorization. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRF add POTRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. + The number of rows and columns in A. + The B matrix. + The number of columns in the B matrix. + This is equivalent to the POTRS LAPACK routine. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. Has to be different than . + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The column to solve for. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the R matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A M by M matrix that holds the Q matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the Q matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A N by N matrix that holds the R matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Perform calculation of Q or R + + Work array + Index of column in work array + Q or R matrices + The first row in + The last row + The first column + The last column + Number of available CPUs + + + + Generate column from initial matrix to work array + + Work array + Initial matrix + The number of rows in matrix + The first row + Column index + + + + Solves A*X=B for X using QR factorization of A. + + The A matrix. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Solves A*X=B for X using a previously QR factored matrix. + + The Q matrix obtained by calling . + The R matrix obtained by calling . + The number of rows in the A matrix. + The number of columns in the A matrix. + Contains additional information on Q. Only used for the native solver + and can be null for the managed provider. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Computes the singular value decomposition of A. + + Compute the singular U and VT vectors or not. + On entry, the M by N matrix to decompose. On exit, A may be overwritten. + The number of rows in the A matrix. + The number of columns in the A matrix. + The singular values of A in ascending value. + If is true, on exit U contains the left + singular vectors. + If is true, on exit VT contains the transposed + right singular vectors. + This is equivalent to the GESVD LAPACK routine. + + + + Solves A*X=B for X using the singular value decomposition of A. + + On entry, the M by N matrix to decompose. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Solves A*X=B for X using a previously SVD decomposed matrix. + + The number of rows in the A matrix. + The number of columns in the A matrix. + The s values returned by . + The left singular vectors returned by . + The right singular vectors returned by . + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Computes the eigenvalues and eigenvectors of a matrix. + + Whether the matrix is symmetric or not. + The order of the matrix. + The matrix to decompose. The lenth of the array must be order * order. + On output, the matrix contains the eigen vectors. The lenth of the array must be order * order. + On output, the eigen values (λ) of matrix in ascending value. The length of the arry must . + On output, the block diagonal eigenvalue matrix. The lenth of the array must be order * order. + + + + Assumes that and have already been transposed. + + + + + Assumes that and have already been transposed. + + + + + Adds a scaled vector to another: result = y + alpha*x. + + The vector to update. + The value to scale by. + The vector to add to . + The result of the addition. + This is similar to the AXPY BLAS routine. + + + + Scales an array. Can be used to scale a vector and a matrix. + + The scalar. + The values to scale. + This result of the scaling. + This is similar to the SCAL BLAS routine. + + + + Conjugates an array. Can be used to conjugate a vector and a matrix. + + The values to conjugate. + This result of the conjugation. + + + + Computes the dot product of x and y. + + The vector x. + The vector y. + The dot product of x and y. + This is equivalent to the DOT BLAS routine. + + + + Does a point wise add of two arrays z = x + y. This can be used + to add vectors or matrices. + + The array x. + The array y. + The result of the addition. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise subtraction of two arrays z = x - y. This can be used + to subtract vectors or matrices. + + The array x. + The array y. + The result of the subtraction. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise multiplication of two arrays z = x * y. This can be used + to multiple elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise multiplication. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise division of two arrays z = x / y. This can be used + to divide elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise division. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise power of two arrays z = x ^ y. This can be used + to raise elements of vectors or matrices to the powers of another vector or matrix. + + The array x. + The array y. + The result of the point wise power. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Computes the requested of the matrix. + + The type of norm to compute. + The number of rows. + The number of columns. + The matrix to compute the norm from. + The requested of the matrix. + + + + Multiples two matrices. result = x * y + + The x matrix. + The number of rows in the x matrix. + The number of columns in the x matrix. + The y matrix. + The number of rows in the y matrix. + The number of columns in the y matrix. + Where to store the result of the multiplication. + This is a simplified version of the BLAS GEMM routine with alpha + set to 1.0 and beta set to 0.0, and x and y are not transposed. + + + + Multiplies two matrices and updates another with the result. c = alpha*op(a)*op(b) + beta*c + + How to transpose the matrix. + How to transpose the matrix. + The value to scale matrix. + The a matrix. + The number of rows in the matrix. + The number of columns in the matrix. + The b matrix + The number of rows in the matrix. + The number of columns in the matrix. + The value to scale the matrix. + The c matrix. + + + + Cache-Oblivious Matrix Multiplication + + if set to true transpose matrix A. + if set to true transpose matrix B. + The value to scale the matrix A with. + The matrix A. + Row-shift of the left matrix + Column-shift of the left matrix + The matrix B. + Row-shift of the right matrix + Column-shift of the right matrix + The matrix C. + Row-shift of the result matrix + Column-shift of the result matrix + The number of rows of matrix op(A) and of the matrix C. + The number of columns of matrix op(B) and of the matrix C. + The number of columns of matrix op(A) and the rows of the matrix op(B). + The constant number of rows of matrix op(A) and of the matrix C. + The constant number of columns of matrix op(B) and of the matrix C. + The constant number of columns of matrix op(A) and the rows of the matrix op(B). + Indicates if this is the first recursion. + + + + Computes the LUP factorization of A. P*A = L*U. + + An by matrix. The matrix is overwritten with the + the LU factorization on exit. The lower triangular factor L is stored in under the diagonal of (the diagonal is always 1.0 + for the L factor). The upper triangular factor U is stored on and above the diagonal of . + The order of the square matrix . + On exit, it contains the pivot indices. The size of the array must be . + This is equivalent to the GETRF LAPACK routine. + + + + Computes the inverse of matrix using LU factorization. + + The N by N matrix to invert. Contains the inverse On exit. + The order of the square matrix . + This is equivalent to the GETRF and GETRI LAPACK routines. + + + + Computes the inverse of a previously factored matrix. + + The LU factored N by N matrix. Contains the inverse On exit. + The order of the square matrix . + The pivot indices of . + This is equivalent to the GETRI LAPACK routine. + + + + Solves A*X=B for X using LU factorization. + + The number of columns of B. + The square matrix A. + The order of the square matrix . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRF and GETRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The number of columns of B. + The factored A matrix. + The order of the square matrix . + The pivot indices of . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRS LAPACK routine. + + + + Computes the Cholesky factorization of A. + + On entry, a square, positive definite matrix. On exit, the matrix is overwritten with the + the Cholesky factorization. + The number of rows or columns in the matrix. + This is equivalent to the POTRF LAPACK routine. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves A*X=B for X using Cholesky factorization. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRF add POTRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRS LAPACK routine. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. Has to be different than . + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The column to solve for. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the R matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A M by M matrix that holds the Q matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the Q matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A N by N matrix that holds the R matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Perform calculation of Q or R + + Work array + Index of column in work array + Q or R matrices + The first row in + The last row + The first column + The last column + Number of available CPUs + + + + Generate column from initial matrix to work array + + Work array + Initial matrix + The number of rows in matrix + The first row + Column index + + + + Solves A*X=B for X using QR factorization of A. + + The A matrix. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Solves A*X=B for X using a previously QR factored matrix. + + The Q matrix obtained by calling . + The R matrix obtained by calling . + The number of rows in the A matrix. + The number of columns in the A matrix. + Contains additional information on Q. Only used for the native solver + and can be null for the managed provider. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Computes the singular value decomposition of A. + + Compute the singular U and VT vectors or not. + On entry, the M by N matrix to decompose. On exit, A may be overwritten. + The number of rows in the A matrix. + The number of columns in the A matrix. + The singular values of A in ascending value. + If is true, on exit U contains the left + singular vectors. + If is true, on exit VT contains the transposed + right singular vectors. + This is equivalent to the GESVD LAPACK routine. + + + + Solves A*X=B for X using the singular value decomposition of A. + + On entry, the M by N matrix to decompose. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Solves A*X=B for X using a previously SVD decomposed matrix. + + The number of rows in the A matrix. + The number of columns in the A matrix. + The s values returned by . + The left singular vectors returned by . + The right singular vectors returned by . + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Computes the eigenvalues and eigenvectors of a matrix. + + Whether the matrix is symmetric or not. + The order of the matrix. + The matrix to decompose. The lenth of the array must be order * order. + On output, the matrix contains the eigen vectors. The lenth of the array must be order * order. + On output, the eigen values (λ) of matrix in ascending value. The length of the arry must . + On output, the block diagonal eigenvalue matrix. The lenth of the array must be order * order. + + + + Assumes that and have already been transposed. + + + + + Assumes that and have already been transposed. + + + + + Try to find out whether the provider is available, at least in principle. + Verification may still fail if available, but it will certainly fail if unavailable. + + + + + Initialize and verify that the provided is indeed available. If not, fall back to alternatives like the managed provider + + + + + Assumes that and have already been transposed. + + + + + Assumes that and have already been transposed. + + + + + Adds a scaled vector to another: result = y + alpha*x. + + The vector to update. + The value to scale by. + The vector to add to . + The result of the addition. + This is similar to the AXPY BLAS routine. + + + + Scales an array. Can be used to scale a vector and a matrix. + + The scalar. + The values to scale. + This result of the scaling. + This is similar to the SCAL BLAS routine. + + + + Conjugates an array. Can be used to conjugate a vector and a matrix. + + The values to conjugate. + This result of the conjugation. + + + + Computes the dot product of x and y. + + The vector x. + The vector y. + The dot product of x and y. + This is equivalent to the DOT BLAS routine. + + + + Does a point wise add of two arrays z = x + y. This can be used + to add vectors or matrices. + + The array x. + The array y. + The result of the addition. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise subtraction of two arrays z = x - y. This can be used + to subtract vectors or matrices. + + The array x. + The array y. + The result of the subtraction. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise multiplication of two arrays z = x * y. This can be used + to multiple elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise multiplication. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise division of two arrays z = x / y. This can be used + to divide elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise division. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise power of two arrays z = x ^ y. This can be used + to raise elements of vectors or matrices to the powers of another vector or matrix. + + The array x. + The array y. + The result of the point wise power. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Computes the requested of the matrix. + + The type of norm to compute. + The number of rows. + The number of columns. + The matrix to compute the norm from. + + The requested of the matrix. + + + + + Multiples two matrices. result = x * y + + The x matrix. + The number of rows in the x matrix. + The number of columns in the x matrix. + The y matrix. + The number of rows in the y matrix. + The number of columns in the y matrix. + Where to store the result of the multiplication. + This is a simplified version of the BLAS GEMM routine with alpha + set to 1.0 and beta set to 0.0, and x and y are not transposed. + + + + Multiplies two matrices and updates another with the result. c = alpha*op(a)*op(b) + beta*c + + How to transpose the matrix. + How to transpose the matrix. + The value to scale matrix. + The a matrix. + The number of rows in the matrix. + The number of columns in the matrix. + The b matrix + The number of rows in the matrix. + The number of columns in the matrix. + The value to scale the matrix. + The c matrix. + + + + Cache-Oblivious Matrix Multiplication + + if set to true transpose matrix A. + if set to true transpose matrix B. + The value to scale the matrix A with. + The matrix A. + Row-shift of the left matrix + Column-shift of the left matrix + The matrix B. + Row-shift of the right matrix + Column-shift of the right matrix + The matrix C. + Row-shift of the result matrix + Column-shift of the result matrix + The number of rows of matrix op(A) and of the matrix C. + The number of columns of matrix op(B) and of the matrix C. + The number of columns of matrix op(A) and the rows of the matrix op(B). + The constant number of rows of matrix op(A) and of the matrix C. + The constant number of columns of matrix op(B) and of the matrix C. + The constant number of columns of matrix op(A) and the rows of the matrix op(B). + Indicates if this is the first recursion. + + + + Computes the LUP factorization of A. P*A = L*U. + + An by matrix. The matrix is overwritten with the + the LU factorization on exit. The lower triangular factor L is stored in under the diagonal of (the diagonal is always 1.0 + for the L factor). The upper triangular factor U is stored on and above the diagonal of . + The order of the square matrix . + On exit, it contains the pivot indices. The size of the array must be . + This is equivalent to the GETRF LAPACK routine. + + + + Computes the inverse of matrix using LU factorization. + + The N by N matrix to invert. Contains the inverse On exit. + The order of the square matrix . + This is equivalent to the GETRF and GETRI LAPACK routines. + + + + Computes the inverse of a previously factored matrix. + + The LU factored N by N matrix. Contains the inverse On exit. + The order of the square matrix . + The pivot indices of . + This is equivalent to the GETRI LAPACK routine. + + + + Solves A*X=B for X using LU factorization. + + The number of columns of B. + The square matrix A. + The order of the square matrix . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRF and GETRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The number of columns of B. + The factored A matrix. + The order of the square matrix . + The pivot indices of . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRS LAPACK routine. + + + + Computes the Cholesky factorization of A. + + On entry, a square, positive definite matrix. On exit, the matrix is overwritten with the + the Cholesky factorization. + The number of rows or columns in the matrix. + This is equivalent to the POTRF LAPACK routine. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves A*X=B for X using Cholesky factorization. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRF add POTRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. Has to be different than . + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRS LAPACK routine. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. Has to be different than . + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The column to solve for. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the R matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A M by M matrix that holds the Q matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the Q matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A N by N matrix that holds the R matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Perform calculation of Q or R + + Work array + Index of column in work array + Q or R matrices + The first row in + The last row + The first column + The last column + Number of available CPUs + + + + Generate column from initial matrix to work array + + Work array + Initial matrix + The number of rows in matrix + The first row + Column index + + + + Solves A*X=B for X using QR factorization of A. + + The A matrix. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Solves A*X=B for X using a previously QR factored matrix. + + The Q matrix obtained by calling . + The R matrix obtained by calling . + The number of rows in the A matrix. + The number of columns in the A matrix. + Contains additional information on Q. Only used for the native solver + and can be null for the managed provider. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Computes the singular value decomposition of A. + + Compute the singular U and VT vectors or not. + On entry, the M by N matrix to decompose. On exit, A may be overwritten. + The number of rows in the A matrix. + The number of columns in the A matrix. + The singular values of A in ascending value. + If is true, on exit U contains the left + singular vectors. + If is true, on exit VT contains the transposed + right singular vectors. + This is equivalent to the GESVD LAPACK routine. + + + + Given the Cartesian coordinates (da, db) of a point p, these function return the parameters da, db, c, and s + associated with the Givens rotation that zeros the y-coordinate of the point. + + Provides the x-coordinate of the point p. On exit contains the parameter r associated with the Givens rotation + Provides the y-coordinate of the point p. On exit contains the parameter z associated with the Givens rotation + Contains the parameter c associated with the Givens rotation + Contains the parameter s associated with the Givens rotation + This is equivalent to the DROTG LAPACK routine. + + + + Solves A*X=B for X using the singular value decomposition of A. + + On entry, the M by N matrix to decompose. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Solves A*X=B for X using a previously SVD decomposed matrix. + + The number of rows in the A matrix. + The number of columns in the A matrix. + The s values returned by . + The left singular vectors returned by . + The right singular vectors returned by . + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Computes the eigenvalues and eigenvectors of a matrix. + + Whether the matrix is symmetric or not. + The order of the matrix. + The matrix to decompose. The lenth of the array must be order * order. + On output, the matrix contains the eigen vectors. The lenth of the array must be order * order. + On output, the eigen values (λ) of matrix in ascending value. The length of the arry must . + On output, the block diagonal eigenvalue matrix. The lenth of the array must be order * order. + + + + Adds a scaled vector to another: result = y + alpha*x. + + The vector to update. + The value to scale by. + The vector to add to . + The result of the addition. + This is similar to the AXPY BLAS routine. + + + + Scales an array. Can be used to scale a vector and a matrix. + + The scalar. + The values to scale. + This result of the scaling. + This is similar to the SCAL BLAS routine. + + + + Conjugates an array. Can be used to conjugate a vector and a matrix. + + The values to conjugate. + This result of the conjugation. + + + + Computes the dot product of x and y. + + The vector x. + The vector y. + The dot product of x and y. + This is equivalent to the DOT BLAS routine. + + + + Does a point wise add of two arrays z = x + y. This can be used + to add vectors or matrices. + + The array x. + The array y. + The result of the addition. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise subtraction of two arrays z = x - y. This can be used + to subtract vectors or matrices. + + The array x. + The array y. + The result of the subtraction. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise multiplication of two arrays z = x * y. This can be used + to multiple elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise multiplication. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise division of two arrays z = x / y. This can be used + to divide elements of vectors or matrices. + + The array x. + The array y. + The result of the point wise division. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Does a point wise power of two arrays z = x ^ y. This can be used + to raise elements of vectors or matrices to the powers of another vector or matrix. + + The array x. + The array y. + The result of the point wise power. + There is no equivalent BLAS routine, but many libraries + provide optimized (parallel and/or vectorized) versions of this + routine. + + + + Computes the requested of the matrix. + + The type of norm to compute. + The number of rows. + The number of columns. + The matrix to compute the norm from. + + The requested of the matrix. + + + + + Multiples two matrices. result = x * y + + The x matrix. + The number of rows in the x matrix. + The number of columns in the x matrix. + The y matrix. + The number of rows in the y matrix. + The number of columns in the y matrix. + Where to store the result of the multiplication. + This is a simplified version of the BLAS GEMM routine with alpha + set to 1.0 and beta set to 0.0, and x and y are not transposed. + + + + Multiplies two matrices and updates another with the result. c = alpha*op(a)*op(b) + beta*c + + How to transpose the matrix. + How to transpose the matrix. + The value to scale matrix. + The a matrix. + The number of rows in the matrix. + The number of columns in the matrix. + The b matrix + The number of rows in the matrix. + The number of columns in the matrix. + The value to scale the matrix. + The c matrix. + + + + Cache-Oblivious Matrix Multiplication + + if set to true transpose matrix A. + if set to true transpose matrix B. + The value to scale the matrix A with. + The matrix A. + Row-shift of the left matrix + Column-shift of the left matrix + The matrix B. + Row-shift of the right matrix + Column-shift of the right matrix + The matrix C. + Row-shift of the result matrix + Column-shift of the result matrix + The number of rows of matrix op(A) and of the matrix C. + The number of columns of matrix op(B) and of the matrix C. + The number of columns of matrix op(A) and the rows of the matrix op(B). + The constant number of rows of matrix op(A) and of the matrix C. + The constant number of columns of matrix op(B) and of the matrix C. + The constant number of columns of matrix op(A) and the rows of the matrix op(B). + Indicates if this is the first recursion. + + + + Computes the LUP factorization of A. P*A = L*U. + + An by matrix. The matrix is overwritten with the + the LU factorization on exit. The lower triangular factor L is stored in under the diagonal of (the diagonal is always 1.0 + for the L factor). The upper triangular factor U is stored on and above the diagonal of . + The order of the square matrix . + On exit, it contains the pivot indices. The size of the array must be . + This is equivalent to the GETRF LAPACK routine. + + + + Computes the inverse of matrix using LU factorization. + + The N by N matrix to invert. Contains the inverse On exit. + The order of the square matrix . + This is equivalent to the GETRF and GETRI LAPACK routines. + + + + Computes the inverse of a previously factored matrix. + + The LU factored N by N matrix. Contains the inverse On exit. + The order of the square matrix . + The pivot indices of . + This is equivalent to the GETRI LAPACK routine. + + + + Solves A*X=B for X using LU factorization. + + The number of columns of B. + The square matrix A. + The order of the square matrix . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRF and GETRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The number of columns of B. + The factored A matrix. + The order of the square matrix . + The pivot indices of . + On entry the B matrix; on exit the X matrix. + This is equivalent to the GETRS LAPACK routine. + + + + Computes the Cholesky factorization of A. + + On entry, a square, positive definite matrix. On exit, the matrix is overwritten with the + the Cholesky factorization. + The number of rows or columns in the matrix. + This is equivalent to the POTRF LAPACK routine. + + + + Calculate Cholesky step + + Factor matrix + Number of rows + Column start + Total columns + Multipliers calculated previously + Number of available processors + + + + Solves A*X=B for X using Cholesky factorization. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRF add POTRS LAPACK routines. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The number of columns in the B matrix. + This is equivalent to the POTRS LAPACK routine. + + + + Solves A*X=B for X using a previously factored A matrix. + + The square, positive definite matrix A. Has to be different than . + The number of rows and columns in A. + On entry the B matrix; on exit the X matrix. + The column to solve for. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the R matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A M by M matrix that holds the Q matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Computes the QR factorization of A. + + On entry, it is the M by N A matrix to factor. On exit, + it is overwritten with the Q matrix of the QR factorization. + The number of rows in the A matrix. + The number of columns in the A matrix. + On exit, A N by N matrix that holds the R matrix of the + QR factorization. + A min(m,n) vector. On exit, contains additional information + to be used by the QR solve routine. + This is similar to the GEQRF and ORGQR LAPACK routines. + + + + Perform calculation of Q or R + + Work array + Index of column in work array + Q or R matrices + The first row in + The last row + The first column + The last column + Number of available CPUs + + + + Generate column from initial matrix to work array + + Work array + Initial matrix + The number of rows in matrix + The first row + Column index + + + + Solves A*X=B for X using QR factorization of A. + + The A matrix. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Solves A*X=B for X using a previously QR factored matrix. + + The Q matrix obtained by calling . + The R matrix obtained by calling . + The number of rows in the A matrix. + The number of columns in the A matrix. + Contains additional information on Q. Only used for the native solver + and can be null for the managed provider. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + The type of QR factorization to perform. + Rows must be greater or equal to columns. + + + + Computes the singular value decomposition of A. + + Compute the singular U and VT vectors or not. + On entry, the M by N matrix to decompose. On exit, A may be overwritten. + The number of rows in the A matrix. + The number of columns in the A matrix. + The singular values of A in ascending value. + If is true, on exit U contains the left + singular vectors. + If is true, on exit VT contains the transposed + right singular vectors. + This is equivalent to the GESVD LAPACK routine. + + + + Given the Cartesian coordinates (da, db) of a point p, these function return the parameters da, db, c, and s + associated with the Givens rotation that zeros the y-coordinate of the point. + + Provides the x-coordinate of the point p. On exit contains the parameter r associated with the Givens rotation + Provides the y-coordinate of the point p. On exit contains the parameter z associated with the Givens rotation + Contains the parameter c associated with the Givens rotation + Contains the parameter s associated with the Givens rotation + This is equivalent to the DROTG LAPACK routine. + + + + Solves A*X=B for X using the singular value decomposition of A. + + On entry, the M by N matrix to decompose. + The number of rows in the A matrix. + The number of columns in the A matrix. + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Solves A*X=B for X using a previously SVD decomposed matrix. + + The number of rows in the A matrix. + The number of columns in the A matrix. + The s values returned by . + The left singular vectors returned by . + The right singular vectors returned by . + The B matrix. + The number of columns of B. + On exit, the solution matrix. + + + + Computes the eigenvalues and eigenvectors of a matrix. + + Whether the matrix is symmetric or not. + The order of the matrix. + The matrix to decompose. The lenth of the array must be order * order. + On output, the matrix contains the eigen vectors. The lenth of the array must be order * order. + On output, the eigen values (λ) of matrix in ascending value. The length of the arry must . + On output, the block diagonal eigenvalue matrix. The lenth of the array must be order * order. + + + + Multiplicative congruential generator using a modulus of 2^31-1 and a multiplier of 1132489760. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + if set to true, the class is thread safe. + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Multiplicative congruential generator using a modulus of 2^59 and a multiplier of 13^13. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + The seed is set to 1, if the zero is used as the seed. + if set to true , the class is thread safe. + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Random number generator using Mersenne Twister 19937 algorithm. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Mersenne twister constant. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + Uses the value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + if set to true, the class is thread safe. + + + + Default instance, thread-safe. + + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Returns a random 32-bit signed integer greater than or equal to zero and less than + + + + + Fills the elements of a specified array of bytes with random numbers in full range, including zero and 255 (). + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + A 32-bit combined multiple recursive generator with 2 components of order 3. + + Based off of P. L'Ecuyer, "Combined Multiple Recursive Random Number Generators," Operations Research, 44, 5 (1996), 816--822. + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + if set to true, the class is thread safe. + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Represents a Parallel Additive Lagged Fibonacci pseudo-random number generator. + + + The type bases upon the implementation in the + Boost Random Number Library. + It uses the modulus 232 and by default the "lags" 418 and 1279. Some popular pairs are presented on + Wikipedia - Lagged Fibonacci generator. + + + + + Default value for the ShortLag + + + + + Default value for the LongLag + + + + + The multiplier to compute a double-precision floating point number [0, 1) + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + if set to true, the class is thread safe. + The ShortLag value + TheLongLag value + + + + Gets the short lag of the Lagged Fibonacci pseudo-random number generator. + + + + + Gets the long lag of the Lagged Fibonacci pseudo-random number generator. + + + + + Stores an array of random numbers + + + + + Stores an index for the random number array element that will be accessed next. + + + + + Fills the array with new unsigned random numbers. + + + Generated random numbers are 32-bit unsigned integers greater than or equal to 0 + and less than or equal to . + + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Returns a random 32-bit signed integer greater than or equal to zero and less than + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + This class implements extension methods for the System.Random class. The extension methods generate + pseudo-random distributed numbers for types other than double and int32. + + + + + Fills an array with uniform random numbers greater than or equal to 0.0 and less than 1.0. + + The random number generator. + The array to fill with random values. + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns an array of uniform random numbers greater than or equal to 0.0 and less than 1.0. + + The random number generator. + The size of the array to fill. + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns an infinite sequence of uniform random numbers greater than or equal to 0.0 and less than 1.0. + + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns an array of uniform random bytes. + + The random number generator. + The size of the array to fill. + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Fills an array with uniform random 32-bit signed integers greater than or equal to zero and less than . + + The random number generator. + The array to fill with random values. + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Fills an array with uniform random 32-bit signed integers within the specified range. + + The random number generator. + The array to fill with random values. + Lower bound, inclusive. + Upper bound, exclusive. + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns an infinite sequence of uniform random numbers greater than or equal to 0.0 and less than 1.0. + + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns a nonnegative random number less than . + + The random number generator. + + A 64-bit signed integer greater than or equal to 0, and less than ; that is, + the range of return values includes 0 but not . + + + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns a random number of the full Int32 range. + + The random number generator. + + A 32-bit signed integer of the full range, including 0, negative numbers, + and . + + + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns a random number of the full Int64 range. + + The random number generator. + + A 64-bit signed integer of the full range, including 0, negative numbers, + and . + + + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns a nonnegative decimal floating point random number less than 1.0. + + The random number generator. + + A decimal floating point number greater than or equal to 0.0, and less than 1.0; that is, + the range of return values includes 0.0 but not 1.0. + + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Returns a random boolean. + + The random number generator. + + This extension is thread-safe if and only if called on an random number + generator provided by Math.NET Numerics or derived from the RandomSource class. + + + + + Provides a time-dependent seed value, matching the default behavior of System.Random. + WARNING: There is no randomness in this seed and quick repeated calls can cause + the same seed value. Do not use for cryptography! + + + + + Provides a seed based on time and unique GUIDs. + WARNING: There is only low randomness in this seed, but at least quick repeated + calls will result in different seed values. Do not use for cryptography! + + + + + Provides a seed based on an internal random number generator (crypto if available), time and unique GUIDs. + WARNING: There is only medium randomness in this seed, but quick repeated + calls will result in different seed values. Do not use for cryptography! + + + + + Base class for random number generators. This class introduces a layer between + and the Math.Net Numerics random number generators to provide thread safety. + When used directly it use the System.Random as random number source. + + + + + Initializes a new instance of the class using + the value of to set whether + the instance is thread safe or not. + + + + + Initializes a new instance of the class. + + if set to true , the class is thread safe. + Thread safe instances are two and half times slower than non-thread + safe classes. + + + + Fills an array with uniform random numbers greater than or equal to 0.0 and less than 1.0. + + The array to fill with random values. + + + + Returns an array of uniform random numbers greater than or equal to 0.0 and less than 1.0. + + The size of the array to fill. + + + + Returns an infinite sequence of uniform random numbers greater than or equal to 0.0 and less than 1.0. + + + + + Returns a random 32-bit signed integer greater than or equal to zero and less than . + + + + + Returns a random number less then a specified maximum. + + The exclusive upper bound of the random number returned. Range: maxExclusive ≥ 1. + A 32-bit signed integer less than . + is zero or negative. + + + + Returns a random number within a specified range. + + The inclusive lower bound of the random number returned. + The exclusive upper bound of the random number returned. Range: maxExclusive > minExclusive. + + A 32-bit signed integer greater than or equal to and less than ; that is, the range of return values includes but not . If equals , is returned. + + is greater than . + + + + Fills an array with random 32-bit signed integers greater than or equal to zero and less than . + + The array to fill with random values. + + + + Returns an array with random 32-bit signed integers greater than or equal to zero and less than . + + The size of the array to fill. + + + + Fills an array with random numbers within a specified range. + + The array to fill with random values. + The exclusive upper bound of the random number returned. Range: maxExclusive ≥ 1. + + + + Returns an array with random 32-bit signed integers within the specified range. + + The size of the array to fill. + The exclusive upper bound of the random number returned. Range: maxExclusive ≥ 1. + + + + Fills an array with random numbers within a specified range. + + The array to fill with random values. + The inclusive lower bound of the random number returned. + The exclusive upper bound of the random number returned. Range: maxExclusive > minExclusive. + + + + Returns an array with random 32-bit signed integers within the specified range. + + The size of the array to fill. + The inclusive lower bound of the random number returned. + The exclusive upper bound of the random number returned. Range: maxExclusive > minExclusive. + + + + Returns an infinite sequence of random 32-bit signed integers greater than or equal to zero and less than . + + + + + Returns an infinite sequence of random numbers within a specified range. + + The inclusive lower bound of the random number returned. + The exclusive upper bound of the random number returned. Range: maxExclusive > minExclusive. + + + + Fills the elements of a specified array of bytes with random numbers. + + An array of bytes to contain random numbers. + is null. + + + + Returns a random number between 0.0 and 1.0. + + A double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Returns a random 32-bit signed integer greater than or equal to zero and less than 2147483647 (). + + + + + Fills the elements of a specified array of bytes with random numbers in full range, including zero and 255 (). + + + + + Returns a random N-bit signed integer greater than or equal to zero and less than 2^N. + N (bit count) is expected to be greater than zero and less than 32 (not verified). + + + + + Returns a random N-bit signed long integer greater than or equal to zero and less than 2^N. + N (bit count) is expected to be greater than zero and less than 64 (not verified). + + + + + Returns a random 32-bit signed integer within the specified range. + + The exclusive upper bound of the random number returned. Range: maxExclusive ≥ 2 (not verified, must be ensured by caller). + + + + Returns a random 32-bit signed integer within the specified range. + + The inclusive lower bound of the random number returned. + The exclusive upper bound of the random number returned. Range: maxExclusive ≥ minExclusive + 2 (not verified, must be ensured by caller). + + + + A random number generator based on the class in the .NET library. + + + + + Construct a new random number generator with a random seed. + + + + + Construct a new random number generator with random seed. + + if set to true , the class is thread safe. + + + + Construct a new random number generator with random seed. + + The seed value. + + + + Construct a new random number generator with random seed. + + The seed value. + if set to true , the class is thread safe. + + + + Default instance, thread-safe. + + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Returns a random 32-bit signed integer greater than or equal to zero and less than + + + + + Returns a random 32-bit signed integer within the specified range. + + The exclusive upper bound of the random number returned. Range: maxExclusive ≥ 2 (not verified, must be ensured by caller). + + + + Returns a random 32-bit signed integer within the specified range. + + The inclusive lower bound of the random number returned. + The exclusive upper bound of the random number returned. Range: maxExclusive ≥ minExclusive + 2 (not verified, must be ensured by caller). + + + + Fills the elements of a specified array of bytes with random numbers in full range, including zero and 255 (). + + + + + Fill an array with uniform random numbers greater than or equal to 0.0 and less than 1.0. + WARNING: potentially very short random sequence length, can generate repeated partial sequences. + + Parallelized on large length, but also supports being called in parallel from multiple threads + + + + Returns an array of uniform random numbers greater than or equal to 0.0 and less than 1.0. + WARNING: potentially very short random sequence length, can generate repeated partial sequences. + + Parallelized on large length, but also supports being called in parallel from multiple threads + + + + Returns an infinite sequence of uniform random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Wichmann-Hill’s 1982 combined multiplicative congruential generator. + + See: Wichmann, B. A. & Hill, I. D. (1982), "Algorithm AS 183: + An efficient and portable pseudo-random number generator". Applied Statistics 31 (1982) 188-190 + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + The seed is set to 1, if the zero is used as the seed. + if set to true , the class is thread safe. + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Wichmann-Hill’s 2006 combined multiplicative congruential generator. + + See: Wichmann, B. A. & Hill, I. D. (2006), "Generating good pseudo-random numbers". + Computational Statistics & Data Analysis 51:3 (2006) 1614-1622 + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + + + + Initializes a new instance of the class. + + The seed value. + The seed is set to 1, if the zero is used as the seed. + if set to true , the class is thread safe. + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Implements a multiply-with-carry Xorshift pseudo random number generator (RNG) specified in Marsaglia, George. (2003). Xorshift RNGs. + Xn = a * Xn−3 + c mod 2^32 + http://www.jstatsoft.org/v08/i14/paper + + + + + The default value for X1. + + + + + The default value for X2. + + + + + The default value for the multiplier. + + + + + The default value for the carry over. + + + + + The multiplier to compute a double-precision floating point number [0, 1) + + + + + Seed or last but three unsigned random number. + + + + + Last but two unsigned random number. + + + + + Last but one unsigned random number. + + + + + The value of the carry over. + + + + + The multiplier. + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + Uses the default values of: + + a = 916905990 + c = 13579 + X1 = 77465321 + X2 = 362436069 + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + The multiply value + The initial carry value. + The initial value if X1. + The initial value if X2. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + Note: must be less than . + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + + Uses the default values of: + + a = 916905990 + c = 13579 + X1 = 77465321 + X2 = 362436069 + + + + + Initializes a new instance of the class using + a seed based on time and unique GUIDs. + + if set to true , the class is thread safe. + The multiply value + The initial carry value. + The initial value if X1. + The initial value if X2. + must be less than . + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + Uses the default values of: + + a = 916905990 + c = 13579 + X1 = 77465321 + X2 = 362436069 + + + + + Initializes a new instance of the class. + + The seed value. + If the seed value is zero, it is set to one. Uses the + value of to + set whether the instance is thread safe. + The multiply value + The initial carry value. + The initial value if X1. + The initial value if X2. + must be less than . + + + + Initializes a new instance of the class. + + The seed value. + if set to true, the class is thread safe. + + Uses the default values of: + + a = 916905990 + c = 13579 + X1 = 77465321 + X2 = 362436069 + + + + + Initializes a new instance of the class. + + The seed value. + if set to true, the class is thread safe. + The multiply value + The initial carry value. + The initial value if X1. + The initial value if X2. + must be less than . + + + + Returns a random double-precision floating point number greater than or equal to 0.0, and less than 1.0. + + + + + Returns a random 32-bit signed integer greater than or equal to zero and less than + + + + + Fills the elements of a specified array of bytes with random numbers in full range, including zero and 255 (). + + + + + Fills an array with random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an array of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads. + + + + Returns an infinite sequence of random numbers greater than or equal to 0.0 and less than 1.0. + + Supports being called in parallel from multiple threads, but the result must be enumerated from a single thread each. + + + + Bisection root-finding algorithm. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + Guess for the low value of the range where the root is supposed to be. Will be expanded if needed. + Guess for the high value of the range where the root is supposed to be. Will be expanded if needed. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Factor at which to expand the bounds, if needed. Default 1.6. + Maximum number of expand iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy for both the root and the function value at the root. The root will be refined until the accuracy or the maximum number of iterations is reached. + Maximum number of iterations. Usually 100. + The root that was found, if any. Undefined if the function returns false. + True if a root with the specified accuracy was found, else false. + + + + Algorithm by by Brent, Van Wijngaarden, Dekker et al. + Implementation inspired by Press, Teukolsky, Vetterling, and Flannery, "Numerical Recipes in C", 2nd edition, Cambridge University Press + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + Guess for the low value of the range where the root is supposed to be. Will be expanded if needed. + Guess for the high value of the range where the root is supposed to be. Will be expanded if needed. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Factor at which to expand the bounds, if needed. Default 1.6. + Maximum number of expand iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. + Maximum number of iterations. Usually 100. + The root that was found, if any. Undefined if the function returns false. + True if a root with the specified accuracy was found, else false. + + + Helper method useful for preventing rounding errors. + a*sign(b) + + + + Algorithm by Broyden. + Implementation inspired by Press, Teukolsky, Vetterling, and Flannery, "Numerical Recipes in C", 2nd edition, Cambridge University Press + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + Initial guess of the root. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + Initial guess of the root. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. + Maximum number of iterations. Usually 100. + The root that was found, if any. Undefined if the function returns false. + True if a root with the specified accuracy was found, else false. + + + + Helper method to calculate an approximation of the Jacobian. + + The function. + The argument (initial guess). + The result (of initial guess). + + + + Finds roots to the cubic equation x^3 + a2*x^2 + a1*x + a0 = 0 + Implements the cubic formula in http://mathworld.wolfram.com/CubicFormula.html + + + + + Q and R are transformed variables. + + + + + n^(1/3) - work around a negative double raised to (1/3) + + + + + Find all real-valued roots of the cubic equation a0 + a1*x + a2*x^2 + x^3 = 0. + Note the special coefficient order ascending by exponent (consistent with polynomials). + + + + + Find all three complex roots of the cubic equation d + c*x + b*x^2 + a*x^3 = 0. + Note the special coefficient order ascending by exponent (consistent with polynomials). + + + + + Pure Newton-Raphson root-finding algorithm without any recovery measures in cases it behaves badly. + The algorithm aborts immediately if the root leaves the bound interval. + + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first derivative of the function to find roots from. + The low value of the range where the root is supposed to be. Aborts if it leaves the interval. + The high value of the range where the root is supposed to be. Aborts if it leaves the interval. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first derivative of the function to find roots from. + Initial guess of the root. + The low value of the range where the root is supposed to be. Aborts if it leaves the interval. Default MinValue. + The high value of the range where the root is supposed to be. Aborts if it leaves the interval. Default MaxValue. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first derivative of the function to find roots from. + Initial guess of the root. + The low value of the range where the root is supposed to be. Aborts if it leaves the interval. + The high value of the range where the root is supposed to be. Aborts if it leaves the interval. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Example: 1e-14. + Maximum number of iterations. Example: 100. + The root that was found, if any. Undefined if the function returns false. + True if a root with the specified accuracy was found, else false. + + + + Robust Newton-Raphson root-finding algorithm that falls back to bisection when overshooting or converging too slow, or to subdivision on lacking bracketing. + + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first derivative of the function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + How many parts an interval should be split into for zero crossing scanning in case of lacking bracketing. Default 20. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first derivative of the function to find roots from. + The low value of the range where the root is supposed to be. + The high value of the range where the root is supposed to be. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Example: 1e-14. + Maximum number of iterations. Example: 100. + How many parts an interval should be split into for zero crossing scanning in case of lacking bracketing. Example: 20. + The root that was found, if any. Undefined if the function returns false. + True if a root with the specified accuracy was found, else false. + + + + Pure Secant root-finding algorithm without any recovery measures in cases it behaves badly. + The algorithm aborts immediately if the root leaves the bound interval. + + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first guess of the root within the bounds specified. + The second guess of the root within the bounds specified. + The low value of the range where the root is supposed to be. Aborts if it leaves the interval. Default MinValue. + The high value of the range where the root is supposed to be. Aborts if it leaves the interval. Default MaxValue. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Default 1e-8. + Maximum number of iterations. Default 100. + Returns the root with the specified accuracy. + + + + Find a solution of the equation f(x)=0. + The function to find roots from. + The first guess of the root within the bounds specified. + The second guess of the root within the bounds specified. + The low value of the range where the root is supposed to be. Aborts if it leaves the interval. + The low value of the range where the root is supposed to be. Aborts if it leaves the interval. + Desired accuracy. The root will be refined until the accuracy or the maximum number of iterations is reached. Example: 1e-14. + Maximum number of iterations. Example: 100. + The root that was found, if any. Undefined if the function returns false. + True if a root with the specified accuracy was found, else false + + + Detect a range containing at least one root. + The function to detect roots from. + Lower value of the range. + Upper value of the range + The growing factor of research. Usually 1.6. + Maximum number of iterations. Usually 50. + True if the bracketing operation succeeded, false otherwise. + This iterative methods stops when two values with opposite signs are found. + + + + Sorting algorithms for single, tuple and triple lists. + + + + + Sort a list of keys, in place using the quick sort algorithm using the quick sort algorithm. + + The type of elements in the key list. + List to sort. + Comparison, defining the sort order. + + + + Sort a list of keys and items with respect to the keys, in place using the quick sort algorithm. + + The type of elements in the key list. + The type of elements in the item list. + List to sort. + List to permute the same way as the key list. + Comparison, defining the sort order. + + + + Sort a list of keys, items1 and items2 with respect to the keys, in place using the quick sort algorithm. + + The type of elements in the key list. + The type of elements in the first item list. + The type of elements in the second item list. + List to sort. + First list to permute the same way as the key list. + Second list to permute the same way as the key list. + Comparison, defining the sort order. + + + + Sort a range of a list of keys, in place using the quick sort algorithm. + + The type of element in the list. + List to sort. + The zero-based starting index of the range to sort. + The length of the range to sort. + Comparison, defining the sort order. + + + + Sort a list of keys and items with respect to the keys, in place using the quick sort algorithm. + + The type of elements in the key list. + The type of elements in the item list. + List to sort. + List to permute the same way as the key list. + The zero-based starting index of the range to sort. + The length of the range to sort. + Comparison, defining the sort order. + + + + Sort a list of keys, items1 and items2 with respect to the keys, in place using the quick sort algorithm. + + The type of elements in the key list. + The type of elements in the first item list. + The type of elements in the second item list. + List to sort. + First list to permute the same way as the key list. + Second list to permute the same way as the key list. + The zero-based starting index of the range to sort. + The length of the range to sort. + Comparison, defining the sort order. + + + + Sort a list of keys and items with respect to the keys, in place using the quick sort algorithm. + + The type of elements in the primary list. + The type of elements in the secondary list. + List to sort. + List to sort on duplicate primary items, and permute the same way as the key list. + Comparison, defining the primary sort order. + Comparison, defining the secondary sort order. + + + + Recursive implementation for an in place quick sort on a list. + + The type of the list on which the quick sort is performed. + The list which is sorted using quick sort. + The method with which to compare two elements of the quick sort. + The left boundary of the quick sort. + The right boundary of the quick sort. + + + + Recursive implementation for an in place quick sort on a list while reordering one other list accordingly. + + The type of the list on which the quick sort is performed. + The type of the list which is automatically reordered accordingly. + The list which is sorted using quick sort. + The list which is automatically reordered accordingly. + The method with which to compare two elements of the quick sort. + The left boundary of the quick sort. + The right boundary of the quick sort. + + + + Recursive implementation for an in place quick sort on one list while reordering two other lists accordingly. + + The type of the list on which the quick sort is performed. + The type of the first list which is automatically reordered accordingly. + The type of the second list which is automatically reordered accordingly. + The list which is sorted using quick sort. + The first list which is automatically reordered accordingly. + The second list which is automatically reordered accordingly. + The method with which to compare two elements of the quick sort. + The left boundary of the quick sort. + The right boundary of the quick sort. + + + + Recursive implementation for an in place quick sort on the primary and then by the secondary list while reordering one secondary list accordingly. + + The type of the primary list. + The type of the secondary list. + The list which is sorted using quick sort. + The list which is sorted secondarily (on primary duplicates) and automatically reordered accordingly. + The method with which to compare two elements of the primary list. + The method with which to compare two elements of the secondary list. + The left boundary of the quick sort. + The right boundary of the quick sort. + + + + Performs an in place swap of two elements in a list. + + The type of elements stored in the list. + The list in which the elements are stored. + The index of the first element of the swap. + The index of the second element of the swap. + + + + This partial implementation of the SpecialFunctions class contains all methods related to the error function. + + + This partial implementation of the SpecialFunctions class contains all methods related to the harmonic function. + + + This partial implementation of the SpecialFunctions class contains all methods related to the logistic function. + + + This partial implementation of the SpecialFunctions class contains all methods related to the modified bessel function. + + + This partial implementation of the SpecialFunctions class contains all methods related to the modified bessel function. + + + + + Computes the logarithm of the Euler Beta function. + + The first Beta parameter, a positive real number. + The second Beta parameter, a positive real number. + The logarithm of the Euler Beta function evaluated at z,w. + If or are not positive. + + + + Computes the Euler Beta function. + + The first Beta parameter, a positive real number. + The second Beta parameter, a positive real number. + The Euler Beta function evaluated at z,w. + If or are not positive. + + + + Returns the lower incomplete (unregularized) beta function + B(a,b,x) = int(t^(a-1)*(1-t)^(b-1),t=0..x) for real a > 0, b > 0, 1 >= x >= 0. + + The first Beta parameter, a positive real number. + The second Beta parameter, a positive real number. + The upper limit of the integral. + The lower incomplete (unregularized) beta function. + + + + Returns the regularized lower incomplete beta function + I_x(a,b) = 1/Beta(a,b) * int(t^(a-1)*(1-t)^(b-1),t=0..x) for real a > 0, b > 0, 1 >= x >= 0. + + The first Beta parameter, a positive real number. + The second Beta parameter, a positive real number. + The upper limit of the integral. + The regularized lower incomplete beta function. + + + + ************************************** + COEFFICIENTS FOR METHOD ErfImp * + ************************************** + + Polynomial coefficients for a numerator of ErfImp + calculation for Erf(x) in the interval [1e-10, 0.5]. + + + + Polynomial coefficients for adenominator of ErfImp + calculation for Erf(x) in the interval [1e-10, 0.5]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [0.5, 0.75]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [0.5, 0.75]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [0.75, 1.25]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [0.75, 1.25]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [1.25, 2.25]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [1.25, 2.25]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [2.25, 3.5]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [2.25, 3.5]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [3.5, 5.25]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [3.5, 5.25]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [5.25, 8]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [5.25, 8]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [8, 11.5]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [8, 11.5]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [11.5, 17]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [11.5, 17]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [17, 24]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [17, 24]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [24, 38]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [24, 38]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [38, 60]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [38, 60]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [60, 85]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [60, 85]. + + + + Polynomial coefficients for a numerator in ErfImp + calculation for Erfc(x) in the interval [85, 110]. + + + + Polynomial coefficients for a denominator in ErfImp + calculation for Erfc(x) in the interval [85, 110]. + + + + + ************************************** + COEFFICIENTS FOR METHOD ErfInvImp * + ************************************** + + Polynomial coefficients for a numerator of ErfInvImp + calculation for Erf^-1(z) in the interval [0, 0.5]. + + + + Polynomial coefficients for a denominator of ErfInvImp + calculation for Erf^-1(z) in the interval [0, 0.5]. + + + + Polynomial coefficients for a numerator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.5, 0.75]. + + + + Polynomial coefficients for a denominator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.5, 0.75]. + + + + Polynomial coefficients for a numerator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x less than 3. + + + + Polynomial coefficients for a denominator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x less than 3. + + + + Polynomial coefficients for a numerator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x between 3 and 6. + + + + Polynomial coefficients for a denominator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x between 3 and 6. + + + + Polynomial coefficients for a numerator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x between 6 and 18. + + + + Polynomial coefficients for a denominator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x between 6 and 18. + + + + Polynomial coefficients for a numerator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x between 18 and 44. + + + + Polynomial coefficients for a denominator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x between 18 and 44. + + + + Polynomial coefficients for a numerator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x greater than 44. + + + + Polynomial coefficients for a denominator of ErfInvImp + calculation for Erf^-1(z) in the interval [0.75, 1] with x greater than 44. + + + + Calculates the error function. + The value to evaluate. + the error function evaluated at given value. + + + returns 1 if x == double.PositiveInfinity. + returns -1 if x == double.NegativeInfinity. + + + + + Calculates the complementary error function. + The value to evaluate. + the complementary error function evaluated at given value. + + + returns 0 if x == double.PositiveInfinity. + returns 2 if x == double.NegativeInfinity. + + + + + Calculates the inverse error function evaluated at z. + The inverse error function evaluated at given value. + + + returns double.PositiveInfinity if z >= 1.0. + returns double.NegativeInfinity if z <= -1.0. + + + Calculates the inverse error function evaluated at z. + value to evaluate. + the inverse error function evaluated at Z. + + + + Implementation of the error function. + + Where to evaluate the error function. + Whether to compute 1 - the error function. + the error function. + + + Calculates the complementary inverse error function evaluated at z. + The complementary inverse error function evaluated at given value. + We have tested this implementation against the arbitrary precision mpmath library + and found cases where we can only guarantee 9 significant figures correct. + + returns double.PositiveInfinity if z <= 0.0. + returns double.NegativeInfinity if z >= 2.0. + + + calculates the complementary inverse error function evaluated at z. + value to evaluate. + the complementary inverse error function evaluated at Z. + + + + The implementation of the inverse error function. + + First intermediate parameter. + Second intermediate parameter. + Third intermediate parameter. + the inverse error function. + + + + Computes the generalized Exponential Integral function (En). + + The argument of the Exponential Integral function. + Integer power of the denominator term. Generalization index. + The value of the Exponential Integral function. + + This implementation of the computation of the Exponential Integral function follows the derivation in + "Handbook of Mathematical Functions, Applied Mathematics Series, Volume 55", Abramowitz, M., and Stegun, I.A. 1964, reprinted 1968 by + Dover Publications, New York), Chapters 6, 7, and 26. + AND + "Advanced mathematical methods for scientists and engineers", Bender, Carl M.; Steven A. Orszag (1978). page 253 + + + for x > 1 uses continued fraction approach that is often used to compute incomplete gamma. + for 0 < x <= 1 uses Taylor series expansion + + Our unit tests suggest that the accuracy of the Exponential Integral function is correct up to 13 floating point digits. + + + + + Initializes static members of the SpecialFunctions class. + + + + + Computes the factorial function x -> x! of an integer number > 0. The function can represent all number up + to 22! exactly, all numbers up to 170! using a double representation. All larger values will overflow. + + A value value! for value > 0 + + If you need to multiply or divide various such factorials, consider using the logarithmic version + instead so you can add instead of multiply and subtract instead of divide, and + then exponentiate the result using . This will also circumvent the problem that + factorials become very large even for small parameters. + + + + + + Computes the factorial of an integer. + + + + + Computes the logarithmic factorial function x -> ln(x!) of an integer number > 0. + + A value value! for value > 0 + + + + Computes the binomial coefficient: n choose k. + + A nonnegative value n. + A nonnegative value h. + The binomial coefficient: n choose k. + + + + Computes the natural logarithm of the binomial coefficient: ln(n choose k). + + A nonnegative value n. + A nonnegative value h. + The logarithmic binomial coefficient: ln(n choose k). + + + + Computes the multinomial coefficient: n choose n1, n2, n3, ... + + A nonnegative value n. + An array of nonnegative values that sum to . + The multinomial coefficient. + if is . + If or any of the are negative. + If the sum of all is not equal to . + + + + The order of the approximation. + + + + + Auxiliary variable when evaluating the function. + + + + + Polynomial coefficients for the approximation. + + + + + Computes the logarithm of the Gamma function. + + The argument of the gamma function. + The logarithm of the gamma function. + + This implementation of the computation of the gamma and logarithm of the gamma function follows the derivation in + "An Analysis Of The Lanczos Gamma Approximation", Glendon Ralph Pugh, 2004. + We use the implementation listed on p. 116 which achieves an accuracy of 16 floating point digits. Although 16 digit accuracy + should be sufficient for double values, improving accuracy is possible (see p. 126 in Pugh). + Our unit tests suggest that the accuracy of the Gamma function is correct up to 14 floating point digits. + + + + + Computes the Gamma function. + + The argument of the gamma function. + The logarithm of the gamma function. + + + This implementation of the computation of the gamma and logarithm of the gamma function follows the derivation in + "An Analysis Of The Lanczos Gamma Approximation", Glendon Ralph Pugh, 2004. + We use the implementation listed on p. 116 which should achieve an accuracy of 16 floating point digits. Although 16 digit accuracy + should be sufficient for double values, improving accuracy is possible (see p. 126 in Pugh). + + Our unit tests suggest that the accuracy of the Gamma function is correct up to 13 floating point digits. + + + + + Returns the upper incomplete regularized gamma function + Q(a,x) = 1/Gamma(a) * int(exp(-t)t^(a-1),t=0..x) for real a > 0, x > 0. + + The argument for the gamma function. + The lower integral limit. + The upper incomplete regularized gamma function. + + + + Returns the upper incomplete gamma function + Gamma(a,x) = int(exp(-t)t^(a-1),t=0..x) for real a > 0, x > 0. + + The argument for the gamma function. + The lower integral limit. + The upper incomplete gamma function. + + + + Returns the lower incomplete gamma function + gamma(a,x) = int(exp(-t)t^(a-1),t=0..x) for real a > 0, x > 0. + + The argument for the gamma function. + The upper integral limit. + The lower incomplete gamma function. + + + + Returns the lower incomplete regularized gamma function + P(a,x) = 1/Gamma(a) * int(exp(-t)t^(a-1),t=0..x) for real a > 0, x > 0. + + The argument for the gamma function. + The upper integral limit. + The lower incomplete gamma function. + + + + Returns the inverse P^(-1) of the regularized lower incomplete gamma function + P(a,x) = 1/Gamma(a) * int(exp(-t)t^(a-1),t=0..x) for real a > 0, x > 0, + such that P^(-1)(a,P(a,x)) == x. + + + + + Computes the Digamma function which is mathematically defined as the derivative of the logarithm of the gamma function. + This implementation is based on + Jose Bernardo + Algorithm AS 103: + Psi ( Digamma ) Function, + Applied Statistics, + Volume 25, Number 3, 1976, pages 315-317. + Using the modifications as in Tom Minka's lightspeed toolbox. + + The argument of the digamma function. + The value of the DiGamma function at . + + + + Computes the inverse Digamma function: this is the inverse of the logarithm of the gamma function. This function will + only return solutions that are positive. + This implementation is based on the bisection method. + + The argument of the inverse digamma function. + The positive solution to the inverse DiGamma function at . + + + + Computes the 'th Harmonic number. + + The Harmonic number which needs to be computed. + The t'th Harmonic number. + + + + Compute the generalized harmonic number of order n of m. (1 + 1/2^m + 1/3^m + ... + 1/n^m) + + The order parameter. + The power parameter. + General Harmonic number. + + + + Computes the logistic function. see: http://en.wikipedia.org/wiki/Logistic + + The parameter for which to compute the logistic function. + The logistic function of . + + + + Computes the logit function, the inverse of the sigmoid logistic function. see: http://en.wikipedia.org/wiki/Logit + + The parameter for which to compute the logit function. This number should be + between 0 and 1. + The logarithm of divided by 1.0 - . + + + + ************************************** + COEFFICIENTS FOR METHODS bessi0 * + ************************************** + + Chebyshev coefficients for exp(-x) I0(x) + in the interval [0, 8]. + + lim(x->0){ exp(-x) I0(x) } = 1. + + + + Chebyshev coefficients for exp(-x) sqrt(x) I0(x) + in the inverted interval [8, infinity]. + + lim(x->inf){ exp(-x) sqrt(x) I0(x) } = 1/sqrt(2pi). + + + + + ************************************** + COEFFICIENTS FOR METHODS bessi1 * + ************************************** + + Chebyshev coefficients for exp(-x) I1(x) / x + in the interval [0, 8]. + + lim(x->0){ exp(-x) I1(x) / x } = 1/2. + + + + Chebyshev coefficients for exp(-x) sqrt(x) I1(x) + in the inverted interval [8, infinity]. + + lim(x->inf){ exp(-x) sqrt(x) I1(x) } = 1/sqrt(2pi). + + + + + ************************************** + COEFFICIENTS FOR METHODS bessk0, bessk0e * + ************************************** + + Chebyshev coefficients for K0(x) + log(x/2) I0(x) + in the interval [0, 2]. The odd order coefficients are all + zero; only the even order coefficients are listed. + + lim(x->0){ K0(x) + log(x/2) I0(x) } = -EUL. + + + + Chebyshev coefficients for exp(x) sqrt(x) K0(x) + in the inverted interval [2, infinity]. + + lim(x->inf){ exp(x) sqrt(x) K0(x) } = sqrt(pi/2). + + + + + ************************************** + COEFFICIENTS FOR METHODS bessk1, bessk1e * + ************************************** + + Chebyshev coefficients for x(K1(x) - log(x/2) I1(x)) + in the interval [0, 2]. + + lim(x->0){ x(K1(x) - log(x/2) I1(x)) } = 1. + + + + Chebyshev coefficients for exp(x) sqrt(x) K1(x) + in the interval [2, infinity]. + + lim(x->inf){ exp(x) sqrt(x) K1(x) } = sqrt(pi/2). + + + + Returns the modified Bessel function of first kind, order 0 of the argument. +

+ The function is defined as i0(x) = j0( ix ). +

+ The range is partitioned into the two intervals [0, 8] and + (8, infinity). Chebyshev polynomial expansions are employed + in each interval. +

+ The value to compute the bessel function of. + +
+ + Returns the modified Bessel function of first kind, + order 1 of the argument. +

+ The function is defined as i1(x) = -i j1( ix ). +

+ The range is partitioned into the two intervals [0, 8] and + (8, infinity). Chebyshev polynomial expansions are employed + in each interval. +

+ The value to compute the bessel function of. + +
+ + Returns the modified Bessel function of the second kind + of order 0 of the argument. +

+ The range is partitioned into the two intervals [0, 8] and + (8, infinity). Chebyshev polynomial expansions are employed + in each interval. +

+ The value to compute the bessel function of. + +
+ + Returns the exponentially scaled modified Bessel function + of the second kind of order 0 of the argument. + + The value to compute the bessel function of. + + + + Returns the modified Bessel function of the second kind + of order 1 of the argument. +

+ The range is partitioned into the two intervals [0, 2] and + (2, infinity). Chebyshev polynomial expansions are employed + in each interval. +

+ The value to compute the bessel function of. + +
+ + Returns the exponentially scaled modified Bessel function + of the second kind of order 1 of the argument. +

+ k1e(x) = exp(x) * k1(x). +

+ The value to compute the bessel function of. + +
+ + + Returns the modified Struve function of order 0. + + The value to compute the function of. + + + + Returns the modified Struve function of order 1. + + The value to compute the function of. + + + + Returns the difference between the Bessel I0 and Struve L0 functions. + + The value to compute the function of. + + + + Returns the difference between the Bessel I1 and Struve L1 functions. + + The value to compute the function of. + + + + Numerically stable exponential minus one, i.e. x -> exp(x)-1 + + A number specifying a power. + Returns exp(power)-1. + + + + Numerically stable hypotenuse of a right angle triangle, i.e. (a,b) -> sqrt(a^2 + b^2) + + The length of side a of the triangle. + The length of side b of the triangle. + Returns sqrt(a2 + b2) without underflow/overflow. + + + + Numerically stable hypotenuse of a right angle triangle, i.e. (a,b) -> sqrt(a^2 + b^2) + + The length of side a of the triangle. + The length of side b of the triangle. + Returns sqrt(a2 + b2) without underflow/overflow. + + + + Numerically stable hypotenuse of a right angle triangle, i.e. (a,b) -> sqrt(a^2 + b^2) + + The length of side a of the triangle. + The length of side b of the triangle. + Returns sqrt(a2 + b2) without underflow/overflow. + + + + Numerically stable hypotenuse of a right angle triangle, i.e. (a,b) -> sqrt(a^2 + b^2) + + The length of side a of the triangle. + The length of side b of the triangle. + Returns sqrt(a2 + b2) without underflow/overflow. + + + + Evaluation functions, useful for function approximation. + + + + + Evaluate a polynomial at point x. + Coefficients are ordered by power with power k at index k. + Example: coefficients [3,-1,2] represent y=2x^2-x+3. + + The location where to evaluate the polynomial at. + The coefficients of the polynomial, coefficient for power k at index k. + + + + Evaluate a polynomial at point x. + Coefficients are ordered by power with power k at index k. + Example: coefficients [3,-1,2] represent y=2x^2-x+3. + + The location where to evaluate the polynomial at. + The coefficients of the polynomial, coefficient for power k at index k. + + + + Evaluate a polynomial at point x. + Coefficients are ordered by power with power k at index k. + Example: coefficients [3,-1,2] represent y=2x^2-x+3. + + The location where to evaluate the polynomial at. + The coefficients of the polynomial, coefficient for power k at index k. + + + + Numerically stable series summation + + provides the summands sequentially + Sum + + + Evaluates the series of Chebyshev polynomials Ti at argument x/2. + The series is given by +
+                  N-1
+                   - '
+            y  =   >   coef[i] T (x/2)
+                   -            i
+                  i=0
+            
+ Coefficients are stored in reverse order, i.e. the zero + order term is last in the array. Note N is the number of + coefficients, not the order. +

+ If coefficients are for the interval a to b, x must + have been transformed to x -> 2(2x - b - a)/(b-a) before + entering the routine. This maps x from (a, b) to (-1, 1), + over which the Chebyshev polynomials are defined. +

+ If the coefficients are for the inverted interval, in + which (a, b) is mapped to (1/b, 1/a), the transformation + required is x -> 2(2ab/x - b - a)/(b-a). If b is infinity, + this becomes x -> 4a/x - 1. +

+ SPEED: +

+ Taking advantage of the recurrence properties of the + Chebyshev polynomials, the routine requires one more + addition per loop than evaluating a nested polynomial of + the same degree. +

+ The coefficients of the polynomial. + Argument to the polynomial. + + Reference: https://bpm2.svn.codeplex.com/svn/Common.Numeric/Arithmetic.cs +

+ Marked as Deprecated in + http://people.apache.org/~isabel/mahout_site/mahout-matrix/apidocs/org/apache/mahout/jet/math/Arithmetic.html + + + +

+ Summation of Chebyshev polynomials, using the Clenshaw method with Reinsch modification. + + The no. of terms in the sequence. + The coefficients of the Chebyshev series, length n+1. + The value at which the series is to be evaluated. + + ORIGINAL AUTHOR: + Dr. Allan J. MacLeod; Dept. of Mathematics and Statistics, University of Paisley; High St., PAISLEY, SCOTLAND + REFERENCES: + "An error analysis of the modified Clenshaw method for evaluating Chebyshev and Fourier series" + J. Oliver, J.I.M.A., vol. 20, 1977, pp379-391 + +
+ + + Valley-shaped Rosenbrock function for 2 dimensions: (x,y) -> (1-x)^2 + 100*(y-x^2)^2. + This function has a global minimum at (1,1) with f(1,1) = 0. + Common range: [-5,10] or [-2.048,2.048]. + + + https://en.wikipedia.org/wiki/Rosenbrock_function + http://www.sfu.ca/~ssurjano/rosen.html + + + + + Valley-shaped Rosenbrock function for 2 or more dimensions. + This function have a global minimum of all ones and, for 8 > N > 3, a local minimum at (-1,1,...,1). + + + https://en.wikipedia.org/wiki/Rosenbrock_function + http://www.sfu.ca/~ssurjano/rosen.html + + + + + Himmelblau, a multi-modal function: (x,y) -> (x^2+y-11)^2 + (x+y^2-7)^2 + This function has 4 global minima with f(x,y) = 0. + Common range: [-6,6]. + Named after David Mautner Himmelblau + + + https://en.wikipedia.org/wiki/Himmelblau%27s_function + + + + + Rastrigin, a highly multi-modal function with many local minima. + Global minimum of all zeros with f(0) = 0. + Common range: [-5.12,5.12]. + + + https://en.wikipedia.org/wiki/Rastrigin_function + http://www.sfu.ca/~ssurjano/rastr.html + + + + + Drop-Wave, a multi-modal and highly complex function with many local minima. + Global minimum of all zeros with f(0) = -1. + Common range: [-5.12,5.12]. + + + http://www.sfu.ca/~ssurjano/drop.html + + + + + Ackley, a function with many local minima. It is nearly flat in outer regions but has a large hole at the center. + Global minimum of all zeros with f(0) = 0. + Common range: [-32.768, 32.768]. + + + http://www.sfu.ca/~ssurjano/ackley.html + + + + + Bowl-shaped first Bohachevsky function. + Global minimum of all zeros with f(0,0) = 0. + Common range: [-100, 100] + + + http://www.sfu.ca/~ssurjano/boha.html + + + + + Plate-shaped Matyas function. + Global minimum of all zeros with f(0,0) = 0. + Common range: [-10, 10]. + + + http://www.sfu.ca/~ssurjano/matya.html + + + + + Valley-shaped six-hump camel back function. + Two global minima and four local minima. Global minima with f(x) ) -1.0316 at (0.0898,-0.7126) and (-0.0898,0.7126). + Common range: x in [-3,3], y in [-2,2]. + + + http://www.sfu.ca/~ssurjano/camel6.html + + + + + Statistics operating on arrays assumed to be unsorted. + WARNING: Methods with the Inplace-suffix may modify the data array by reordering its entries. + + + + + + + + Returns the smallest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the smallest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the largest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the largest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the smallest value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the largest value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the smallest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the largest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the geometric mean of the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the harmonic mean of the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population variance from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the population variance from the full population provided as unsorted array. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population standard deviation from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the population standard deviation from the full population provided as unsorted array. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population variance from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN and NaN for variance if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population standard deviation from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN and NaN for standard deviation if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population covariance from the provided two sample arrays. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + First sample array. + Second sample array. + + + + Evaluates the population covariance from the full population provided as two arrays. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + First population array. + Second population array. + + + + Estimates the root mean square (RMS) also known as quadratic mean from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the order statistic (order 1..N) from the unsorted data array. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + One-based order of the statistic, must be between 1 and N (inclusive). + + + + Estimates the median value from the unsorted data array. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the p-Percentile value from the unsorted data array. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Percentile selector, between 0 and 100 (inclusive). + + + + Estimates the first quartile value from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the third quartile value from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the inter-quartile range from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates {min, lower-quantile, median, upper-quantile, max} from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the tau-th quantile from the unsorted data array. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Quantile selector, between 0.0 and 1.0 (inclusive). + + R-8, SciPy-(1/3,1/3): + Linear interpolation of the approximate medians for order statistics. + When tau < (2/3) / (N + 1/3), use x1. When tau >= (N - 1/3) / (N + 1/3), use xN. + + + + + Estimates the tau-th quantile from the unsorted data array. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile defintion can be specified + by 4 parameters a, b, c and d, consistent with Mathematica. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Quantile selector, between 0.0 and 1.0 (inclusive) + a-parameter + b-parameter + c-parameter + d-parameter + + + + Estimates the tau-th quantile from the unsorted data array. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Quantile selector, between 0.0 and 1.0 (inclusive) + Quantile definition, to choose what product/definition it should be consistent with + + + + Evaluates the rank of each entry of the unsorted data array. + The rank definition can be specified to be compatible + with an existing system. + WARNING: Works inplace and can thus causes the data array to be reordered. + + + + + Estimates the arithmetic sample mean from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the geometric mean of the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the harmonic mean of the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population variance from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the population variance from the full population provided as unsorted array. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population standard deviation from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the population standard deviation from the full population provided as unsorted array. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population variance from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN and NaN for variance if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population standard deviation from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN and NaN for standard deviation if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population covariance from the provided two sample arrays. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + First sample array. + Second sample array. + + + + Evaluates the population covariance from the full population provided as two arrays. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + First population array. + Second population array. + + + + Estimates the root mean square (RMS) also known as quadratic mean from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the smallest value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the smallest value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the smallest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the largest absolute value from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the geometric mean of the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the harmonic mean of the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population variance from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the population variance from the full population provided as unsorted array. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population standard deviation from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Evaluates the population standard deviation from the full population provided as unsorted array. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population variance from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN and NaN for variance if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population standard deviation from the provided samples as unsorted array. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN and NaN for standard deviation if data has less than two entries or if any entry is NaN. + + Sample array, no sorting is assumed. + + + + Estimates the unbiased population covariance from the provided two sample arrays. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + First sample array. + Second sample array. + + + + Evaluates the population covariance from the full population provided as two arrays. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + First population array. + Second population array. + + + + Estimates the root mean square (RMS) also known as quadratic mean from the unsorted data array. + Returns NaN if data is empty or any entry is NaN. + + Sample array, no sorting is assumed. + + + + Returns the order statistic (order 1..N) from the unsorted data array. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + One-based order of the statistic, must be between 1 and N (inclusive). + + + + Estimates the median value from the unsorted data array. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the p-Percentile value from the unsorted data array. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Percentile selector, between 0 and 100 (inclusive). + + + + Estimates the first quartile value from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the third quartile value from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the inter-quartile range from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates {min, lower-quantile, median, upper-quantile, max} from the unsorted data array. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + + + + Estimates the tau-th quantile from the unsorted data array. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Quantile selector, between 0.0 and 1.0 (inclusive). + + R-8, SciPy-(1/3,1/3): + Linear interpolation of the approximate medians for order statistics. + When tau < (2/3) / (N + 1/3), use x1. When tau >= (N - 1/3) / (N + 1/3), use xN. + + + + + Estimates the tau-th quantile from the unsorted data array. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile defintion can be specified + by 4 parameters a, b, c and d, consistent with Mathematica. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Quantile selector, between 0.0 and 1.0 (inclusive) + a-parameter + b-parameter + c-parameter + d-parameter + + + + Estimates the tau-th quantile from the unsorted data array. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + WARNING: Works inplace and can thus causes the data array to be reordered. + + Sample array, no sorting is assumed. Will be reordered. + Quantile selector, between 0.0 and 1.0 (inclusive) + Quantile definition, to choose what product/definition it should be consistent with + + + + Evaluates the rank of each entry of the unsorted data array. + The rank definition can be specified to be compatible + with an existing system. + WARNING: Works inplace and can thus causes the data array to be reordered. + + + + + A class with correlation measures between two datasets. + + + + + Computes the Pearson Product-Moment Correlation coefficient. + + Sample data A. + Sample data B. + The Pearson product-moment correlation coefficient. + + + + Computes the Weighted Pearson Product-Moment Correlation coefficient. + + Sample data A. + Sample data B. + Corresponding weights of data. + The Weighted Pearson product-moment correlation coefficient. + + + + Computes the Pearson Product-Moment Correlation matrix. + + Array of sample data vectors. + The Pearson product-moment correlation matrix. + + + + Computes the Pearson Product-Moment Correlation matrix. + + Enumerable of sample data vectors. + The Pearson product-moment correlation matrix. + + + + Computes the Spearman Ranked Correlation coefficient. + + Sample data series A. + Sample data series B. + The Spearman ranked correlation coefficient. + + + + Computes the Spearman Ranked Correlation matrix. + + Array of sample data vectors. + The Spearman ranked correlation matrix. + + + + Computes the Spearman Ranked Correlation matrix. + + Enumerable of sample data vectors. + The Spearman ranked correlation matrix. + + + + Computes the basic statistics of data set. The class meets the + NIST standard of accuracy for mean, variance, and standard deviation + (the only statistics they provide exact values for) and exceeds them + in increased accuracy mode. + Recommendation: consider to use RunningStatistics instead. + + + This type declares a DataContract for out of the box ephemeral serialization + with engines like DataContractSerializer, Protocol Buffers and FsPickler, + but does not guarantee any compatibility between versions. + It is not recommended to rely on this mechanism for durable persistance. + + + + + Initializes a new instance of the class. + + The sample data. + + If set to true, increased accuracy mode used. + Increased accuracy mode uses types for internal calculations. + + + Don't use increased accuracy for data sets containing large values (in absolute value). + This may cause the calculations to overflow. + + + + + Initializes a new instance of the class. + + The sample data. + + If set to true, increased accuracy mode used. + Increased accuracy mode uses types for internal calculations. + + + Don't use increased accuracy for data sets containing large values (in absolute value). + This may cause the calculations to overflow. + + + + + Gets the size of the sample. + + The size of the sample. + + + + Gets the sample mean. + + The sample mean. + + + + Gets the unbiased population variance estimator (on a dataset of size N will use an N-1 normalizer). + + The sample variance. + + + + Gets the unbiased population standard deviation (on a dataset of size N will use an N-1 normalizer). + + The sample standard deviation. + + + + Gets the sample skewness. + + The sample skewness. + Returns zero if is less than three. + + + + Gets the sample kurtosis. + + The sample kurtosis. + Returns zero if is less than four. + + + + Gets the maximum sample value. + + The maximum sample value. + + + + Gets the minimum sample value. + + The minimum sample value. + + + + Computes descriptive statistics from a stream of data values. + + A sequence of datapoints. + + + + Computes descriptive statistics from a stream of nullable data values. + + A sequence of datapoints. + + + + Computes descriptive statistics from a stream of data values. + + A sequence of datapoints. + + + + Computes descriptive statistics from a stream of nullable data values. + + A sequence of datapoints. + + + + Internal use. Method use for setting the statistics. + + For setting Mean. + For setting Variance. + For setting Skewness. + For setting Kurtosis. + For setting Minimum. + For setting Maximum. + For setting Count. + + + + A consists of a series of s, + each representing a region limited by a lower bound (exclusive) and an upper bound (inclusive). + + + This type declares a DataContract for out of the box ephemeral serialization + with engines like DataContractSerializer, Protocol Buffers and FsPickler, + but does not guarantee any compatibility between versions. + It is not recommended to rely on this mechanism for durable persistance. + + + + + This IComparer performs comparisons between a point and a bucket. + + + + + Compares a point and a bucket. The point will be encapsulated in a bucket with width 0. + + The first bucket to compare. + The second bucket to compare. + -1 when the point is less than this bucket, 0 when it is in this bucket and 1 otherwise. + + + + Lower Bound of the Bucket. + + + + + Upper Bound of the Bucket. + + + + + The number of datapoints in the bucket. + + + Value may be NaN if this was constructed as a argument. + + + + + Initializes a new instance of the Bucket class. + + + + + Constructs a Bucket that can be used as an argument for a + like when performing a Binary search. + + Value to look for + + + + Creates a copy of the Bucket with the lowerbound, upperbound and counts exactly equal. + + A cloned Bucket object. + + + + Width of the Bucket. + + + + + True if this is a single point argument for + when performing a Binary search. + + + + + Default comparer. + + + + + This method check whether a point is contained within this bucket. + + The point to check. + + 0 if the point falls within the bucket boundaries; + -1 if the point is smaller than the bucket, + +1 if the point is larger than the bucket. + + + + Comparison of two disjoint buckets. The buckets cannot be overlapping. + + + 0 if UpperBound and LowerBound are bit-for-bit equal + 1 if This bucket is lower that the compared bucket + -1 otherwise + + + + + Checks whether two Buckets are equal. + + + UpperBound and LowerBound are compared bit-for-bit, but This method tolerates a + difference in Count given by . + + + + + Provides a hash code for this bucket. + + + + + Formats a human-readable string for this bucket. + + + + + A class which computes histograms of data. + + + + + Contains all the Buckets of the Histogram. + + + + + Indicates whether the elements of buckets are currently sorted. + + + + + Initializes a new instance of the Histogram class. + + + + + Constructs a Histogram with a specific number of equally sized buckets. The upper and lower bound of the histogram + will be set to the smallest and largest datapoint. + + The datasequence to build a histogram on. + The number of buckets to use. + + + + Constructs a Histogram with a specific number of equally sized buckets. + + The datasequence to build a histogram on. + The number of buckets to use. + The histogram lower bound. + The histogram upper bound. + + + + Add one data point to the histogram. If the datapoint falls outside the range of the histogram, + the lowerbound or upperbound will automatically adapt. + + The datapoint which we want to add. + + + + Add a sequence of data point to the histogram. If the datapoint falls outside the range of the histogram, + the lowerbound or upperbound will automatically adapt. + + The sequence of datapoints which we want to add. + + + + Adds a Bucket to the Histogram. + + + + + Sort the buckets if needed. + + + + + Returns the Bucket that contains the value v. + + The point to search the bucket for. + A copy of the bucket containing point . + + + + Returns the index in the Histogram of the Bucket + that contains the value v. + + The point to search the bucket index for. + The index of the bucket containing the point. + + + + Returns the lower bound of the histogram. + + + + + Returns the upper bound of the histogram. + + + + + Gets the n'th bucket. + + The index of the bucket to be returned. + A copy of the n'th bucket. + + + + Gets the number of buckets. + + + + + Gets the total number of datapoints in the histogram. + + + + + Prints the buckets contained in the . + + + + + A hybrid Monte Carlo sampler for multivariate distributions. + + + + + Number of parameters in the density function. + + + + + Distribution to sample momentum from. + + + + + Standard deviations used in the sampling of different components of the + momentum. + + + + + Gets or sets the standard deviations used in the sampling of different components of the + momentum. + + When the length of pSdv is not the same as Length. + + + + Constructs a new Hybrid Monte Carlo sampler for a multivariate probability distribution. + The components of the momentum will be sampled from a normal distribution with standard deviation + 1 using the default random + number generator. A three point estimation will be used for differentiation. + This constructor will set the burn interval. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + When the number of burnInterval iteration is negative. + + + + Constructs a new Hybrid Monte Carlo sampler for a multivariate probability distribution. + The components of the momentum will be sampled from a normal distribution with standard deviation + specified by pSdv using the default random + number generator. A three point estimation will be used for differentiation. + This constructor will set the burn interval. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + The standard deviations of the normal distributions that are used to sample + the components of the momentum. + When the number of burnInterval iteration is negative. + + + + Constructs a new Hybrid Monte Carlo sampler for a multivariate probability distribution. + The components of the momentum will be sampled from a normal distribution with standard deviation + specified by pSdv using the a random number generator provided by the user. + A three point estimation will be used for differentiation. + This constructor will set the burn interval. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + The standard deviations of the normal distributions that are used to sample + the components of the momentum. + Random number generator used for sampling the momentum. + When the number of burnInterval iteration is negative. + + + + Constructs a new Hybrid Monte Carlo sampler for a multivariate probability distribution. + The components of the momentum will be sampled from a normal distribution with standard deviations + given by pSdv. This constructor will set the burn interval, the method used for + numerical differentiation and the random number generator. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + The standard deviations of the normal distributions that are used to sample + the components of the momentum. + Random number generator used for sampling the momentum. + The method used for numerical differentiation. + When the number of burnInterval iteration is negative. + When the length of pSdv is not the same as x0. + + + + Initialize parameters. + + The current location of the sampler. + + + + Checking that the location and the momentum are of the same dimension and that each component is positive. + + The standard deviations used for sampling the momentum. + When the length of pSdv is not the same as Length or if any + component is negative. + When pSdv is null. + + + + Use for copying objects in the Burn method. + + The source of copying. + A copy of the source object. + + + + Use for creating temporary objects in the Burn method. + + An object of type T. + + + + + + + + + + + + + Samples the momentum from a normal distribution. + + The momentum to be randomized. + + + + The default method used for computing the gradient. Uses a simple three point estimation. + + Function which the gradient is to be evaluated. + The location where the gradient is to be evaluated. + The gradient of the function at the point x. + + + + The Hybrid (also called Hamiltonian) Monte Carlo produces samples from distribution P using a set + of Hamiltonian equations to guide the sampling process. It uses the negative of the log density as + a potential energy, and a randomly generated momentum to set up a Hamiltonian system, which is then used + to sample the distribution. This can result in a faster convergence than the random walk Metropolis sampler + (). + + The type of samples this sampler produces. + + + + The delegate type that defines a derivative evaluated at a certain point. + + Function to be differentiated. + Value where the derivative is computed. + + + + Evaluates the energy function of the target distribution. + + + + + The current location of the sampler. + + + + + The number of burn iterations between two samples. + + + + + The size of each step in the Hamiltonian equation. + + + + + The number of iterations in the Hamiltonian equation. + + + + + The algorithm used for differentiation. + + + + + Gets or sets the number of iterations in between returning samples. + + When burn interval is negative. + + + + Gets or sets the number of iterations in the Hamiltonian equation. + + When frogleap steps is negative or zero. + + + + Gets or sets the size of each step in the Hamiltonian equation. + + When step size is negative or zero. + + + + Constructs a new Hybrid Monte Carlo sampler. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + Random number generator used for sampling the momentum. + The method used for differentiation. + When the number of burnInterval iteration is negative. + When either x0, pdfLnP or diff is null. + + + + Returns a sample from the distribution P. + + + + + This method runs the sampler for a number of iterations without returning a sample + + + + + Method used to update the sample location. Used in the end of the loop. + + The old energy. + The old gradient/derivative of the energy. + The new sample. + The new gradient/derivative of the energy. + The new energy. + The difference between the old Hamiltonian and new Hamiltonian. Use to determine + if an update should take place. + + + + Use for creating temporary objects in the Burn method. + + An object of type T. + + + + Use for copying objects in the Burn method. + + The source of copying. + A copy of the source object. + + + + Method for doing dot product. + + First vector/scalar in the product. + Second vector/scalar in the product. + + + + Method for adding, multiply the second vector/scalar by factor and then + add it to the first vector/scalar. + + First vector/scalar. + Scalar factor multiplying by the second vector/scalar. + Second vector/scalar. + + + + Multiplying the second vector/scalar by factor and then subtract it from + the first vector/scalar. + + First vector/scalar. + Scalar factor to be multiplied to the second vector/scalar. + Second vector/scalar. + + + + Method for sampling a random momentum. + + Momentum to be randomized. + + + + The Hamiltonian equations that is used to produce the new sample. + + + + + Method to compute the Hamiltonian used in the method. + + The momentum. + The energy. + Hamiltonian=E+p.p/2 + + + + Method to check and set a quantity to a non-negative value. + + Proposed value to be checked. + Returns value if it is greater than or equal to zero. + Throws when value is negative. + + + + Method to check and set a quantity to a non-negative value. + + Proposed value to be checked. + Returns value if it is greater than to zero. + Throws when value is negative or zero. + + + + Method to check and set a quantity to a non-negative value. + + Proposed value to be checked. + Returns value if it is greater than zero. + Throws when value is negative or zero. + + + + Provides utilities to analysis the convergence of a set of samples from + a . + + + + + Computes the auto correlations of a series evaluated by a function f. + + The series for computing the auto correlation. + The lag in the series + The function used to evaluate the series. + The auto correlation. + Throws if lag is zero or if lag is + greater than or equal to the length of Series. + + + + Computes the effective size of the sample when evaluated by a function f. + + The samples. + The function use for evaluating the series. + The effective size when auto correlation is taken into account. + + + + A method which samples datapoints from a proposal distribution. The implementation of this sampler + is stateless: no variables are saved between two calls to Sample. This proposal is different from + in that it doesn't take any parameters; it samples random + variables from the whole domain. + + The type of the datapoints. + A sample from the proposal distribution. + + + + A method which samples datapoints from a proposal distribution given an initial sample. The implementation + of this sampler is stateless: no variables are saved between two calls to Sample. This proposal is different from + in that it samples locally around an initial point. In other words, it + makes a small local move rather than producing a global sample from the proposal. + + The type of the datapoints. + The initial sample. + A sample from the proposal distribution. + + + + A function which evaluates a density. + + The type of data the distribution is over. + The sample we want to evaluate the density for. + + + + A function which evaluates a log density. + + The type of data the distribution is over. + The sample we want to evaluate the log density for. + + + + A function which evaluates the log of a transition kernel probability. + + The type for the space over which this transition kernel is defined. + The new state in the transition. + The previous state in the transition. + The log probability of the transition. + + + + The interface which every sampler must implement. + + The type of samples this sampler produces. + + + + The random number generator for this class. + + + + + Keeps track of the number of accepted samples. + + + + + Keeps track of the number of calls to the proposal sampler. + + + + + Initializes a new instance of the class. + + Thread safe instances are two and half times slower than non-thread + safe classes. + + + + Gets or sets the random number generator. + + When the random number generator is null. + + + + Returns one sample. + + + + + Returns a number of samples. + + The number of samples we want. + An array of samples. + + + + Gets the acceptance rate of the sampler. + + + + + Metropolis-Hastings sampling produces samples from distribition P by sampling from a proposal distribution Q + and accepting/rejecting based on the density of P. Metropolis-Hastings sampling doesn't require that the + proposal distribution Q is symmetric in comparison to . It does need to + be able to evaluate the proposal sampler's log density though. All densities are required to be in log space. + + The Metropolis-Hastings sampler is a stateful sampler. It keeps track of where it currently is in the domain + of the distribution P. + + The type of samples this sampler produces. + + + + Evaluates the log density function of the target distribution. + + + + + Evaluates the log transition probability for the proposal distribution. + + + + + A function which samples from a proposal distribution. + + + + + The current location of the sampler. + + + + + The log density at the current location. + + + + + The number of burn iterations between two samples. + + + + + Constructs a new Metropolis-Hastings sampler using the default random number generator. This + constructor will set the burn interval. + + The initial sample. + The log density of the distribution we want to sample from. + The log transition probability for the proposal distribution. + A method that samples from the proposal distribution. + The number of iterations in between returning samples. + When the number of burnInterval iteration is negative. + + + + Gets or sets the number of iterations in between returning samples. + + When burn interval is negative. + + + + This method runs the sampler for a number of iterations without returning a sample + + + + + Returns a sample from the distribution P. + + + + + Metropolis sampling produces samples from distribition P by sampling from a proposal distribution Q + and accepting/rejecting based on the density of P. Metropolis sampling requires that the proposal + distribution Q is symmetric. All densities are required to be in log space. + + The Metropolis sampler is a stateful sampler. It keeps track of where it currently is in the domain + of the distribution P. + + The type of samples this sampler produces. + + + + Evaluates the log density function of the sampling distribution. + + + + + A function which samples from a proposal distribution. + + + + + The current location of the sampler. + + + + + The log density at the current location. + + + + + The number of burn iterations between two samples. + + + + + Constructs a new Metropolis sampler using the default random number generator. + + The initial sample. + The log density of the distribution we want to sample from. + A method that samples from the symmetric proposal distribution. + The number of iterations in between returning samples. + When the number of burnInterval iteration is negative. + + + + Gets or sets the number of iterations in between returning samples. + + When burn interval is negative. + + + + This method runs the sampler for a number of iterations without returning a sample + + + + + Returns a sample from the distribution P. + + + + + Rejection sampling produces samples from distribition P by sampling from a proposal distribution Q + and accepting/rejecting based on the density of P and Q. The density of P and Q don't need to + to be normalized, but we do need that for each x, P(x) < Q(x). + + The type of samples this sampler produces. + + + + Evaluates the density function of the sampling distribution. + + + + + Evaluates the density function of the proposal distribution. + + + + + A function which samples from a proposal distribution. + + + + + Constructs a new rejection sampler using the default random number generator. + + The density of the distribution we want to sample from. + The density of the proposal distribution. + A method that samples from the proposal distribution. + + + + Returns a sample from the distribution P. + + When the algorithms detects that the proposal + distribution doesn't upper bound the target distribution. + + + + A hybrid Monte Carlo sampler for univariate distributions. + + + + + Distribution to sample momentum from. + + + + + Standard deviations used in the sampling of the + momentum. + + + + + Gets or sets the standard deviation used in the sampling of the + momentum. + + When standard deviation is negative. + + + + Constructs a new Hybrid Monte Carlo sampler for a univariate probability distribution. + The momentum will be sampled from a normal distribution with standard deviation + specified by pSdv using the default random + number generator. A three point estimation will be used for differentiation. + This constructor will set the burn interval. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + The standard deviation of the normal distribution that is used to sample + the momentum. + When the number of burnInterval iteration is negative. + + + + Constructs a new Hybrid Monte Carlo sampler for a univariate probability distribution. + The momentum will be sampled from a normal distribution with standard deviation + specified by pSdv using a random + number generator provided by the user. A three point estimation will be used for differentiation. + This constructor will set the burn interval. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + The standard deviation of the normal distribution that is used to sample + the momentum. + Random number generator used to sample the momentum. + When the number of burnInterval iteration is negative. + + + + Constructs a new Hybrid Monte Carlo sampler for a multivariate probability distribution. + The momentum will be sampled from a normal distribution with standard deviation + given by pSdv using a random + number generator provided by the user. This constructor will set both the burn interval and the method used for + numerical differentiation. + + The initial sample. + The log density of the distribution we want to sample from. + Number frogleap simulation steps. + Size of the frogleap simulation steps. + The number of iterations in between returning samples. + The standard deviation of the normal distribution that is used to sample + the momentum. + The method used for numerical differentiation. + Random number generator used for sampling the momentum. + When the number of burnInterval iteration is negative. + + + + Use for copying objects in the Burn method. + + The source of copying. + A copy of the source object. + + + + Use for creating temporary objects in the Burn method. + + An object of type T. + + + + + + + + + + + + + Samples the momentum from a normal distribution. + + The momentum to be randomized. + + + + The default method used for computing the derivative. Uses a simple three point estimation. + + Function for which the derivative is to be evaluated. + The location where the derivative is to be evaluated. + The derivative of the function at the point x. + + + + Slice sampling produces samples from distribition P by uniformly sampling from under the pdf of P using + a technique described in "Slice Sampling", R. Neal, 2003. All densities are required to be in log space. + + The slice sampler is a stateful sampler. It keeps track of where it currently is in the domain + of the distribution P. + + + + + Evaluates the log density function of the target distribution. + + + + + The current location of the sampler. + + + + + The log density at the current location. + + + + + The number of burn iterations between two samples. + + + + + The scale of the slice sampler. + + + + + Constructs a new Slice sampler using the default random + number generator. The burn interval will be set to 0. + + The initial sample. + The density of the distribution we want to sample from. + The scale factor of the slice sampler. + When the scale of the slice sampler is not positive. + + + + Constructs a new slice sampler using the default random number generator. It + will set the number of burnInterval iterations and run a burnInterval phase. + + The initial sample. + The density of the distribution we want to sample from. + The number of iterations in between returning samples. + The scale factor of the slice sampler. + When the number of burnInterval iteration is negative. + When the scale of the slice sampler is not positive. + + + + Gets or sets the number of iterations in between returning samples. + + When burn interval is negative. + + + + Gets or sets the scale of the slice sampler. + + + + + This method runs the sampler for a number of iterations without returning a sample + + + + + Returns a sample from the distribution P. + + + + + Running statistics over a window of data, allows updating by adding values. + + + + + Gets the total number of samples. + + + + + Returns the minimum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + + + + Returns the maximum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + + + + Evaluates the sample mean, an estimate of the population mean. + Returns NaN if data is empty or if any entry is NaN. + + + + + Estimates the unbiased population variance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + + + + Evaluates the variance from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + + + + Estimates the unbiased population standard deviation from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + + + + Evaluates the standard deviation from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + + + + Update the running statistics by adding another observed sample (in-place). + + + + + Update the running statistics by adding a sequence of observed sample (in-place). + + + + Replace ties with their mean (non-integer ranks). Default. + + + Replace ties with their minimum (typical sports ranking). + + + Replace ties with their maximum. + + + Permutation with increasing values at each index of ties. + + + + Running statistics accumulator, allows updating by adding values + or by combining two accumulators. + + + This type declares a DataContract for out of the box ephemeral serialization + with engines like DataContractSerializer, Protocol Buffers and FsPickler, + but does not guarantee any compatibility between versions. + It is not recommended to rely on this mechanism for durable persistance. + + + + + Gets the total number of samples. + + + + + Returns the minimum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + + + + Returns the maximum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + + + + Evaluates the sample mean, an estimate of the population mean. + Returns NaN if data is empty or if any entry is NaN. + + + + + Estimates the unbiased population variance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + + + + Evaluates the variance from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + + + + Estimates the unbiased population standard deviation from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + + + + Evaluates the standard deviation from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + + + + Estimates the unbiased population skewness from the provided samples. + Uses a normalizer (Bessel's correction; type 2). + Returns NaN if data has less than three entries or if any entry is NaN. + + + + + Evaluates the population skewness from the full population. + Does not use a normalizer and would thus be biased if applied to a subset (type 1). + Returns NaN if data has less than two entries or if any entry is NaN. + + + + + Estimates the unbiased population kurtosis from the provided samples. + Uses a normalizer (Bessel's correction; type 2). + Returns NaN if data has less than four entries or if any entry is NaN. + + + + + Evaluates the population kurtosis from the full population. + Does not use a normalizer and would thus be biased if applied to a subset (type 1). + Returns NaN if data has less than three entries or if any entry is NaN. + + + + + Update the running statistics by adding another observed sample (in-place). + + + + + Update the running statistics by adding a sequence of observed sample (in-place). + + + + + Create a new running statistics over the combined samples of two existing running statistics. + + + + + Statistics operating on an array already sorted ascendingly. + + + + + + + + Returns the smallest value from the sorted data array (ascending). + + Sample array, must be sorted ascendingly. + + + + Returns the largest value from the sorted data array (ascending). + + Sample array, must be sorted ascendingly. + + + + Returns the order statistic (order 1..N) from the sorted data array (ascending). + + Sample array, must be sorted ascendingly. + One-based order of the statistic, must be between 1 and N (inclusive). + + + + Estimates the median value from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the p-Percentile value from the sorted data array (ascending). + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + Percentile selector, between 0 and 100 (inclusive). + + + + Estimates the first quartile value from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the third quartile value from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the inter-quartile range from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates {min, lower-quantile, median, upper-quantile, max} from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the tau-th quantile from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + Quantile selector, between 0.0 and 1.0 (inclusive). + + R-8, SciPy-(1/3,1/3): + Linear interpolation of the approximate medians for order statistics. + When tau < (2/3) / (N + 1/3), use x1. When tau >= (N - 1/3) / (N + 1/3), use xN. + + + + + Estimates the tau-th quantile from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile defintion can be specified + by 4 parameters a, b, c and d, consistent with Mathematica. + + Sample array, must be sorted ascendingly. + Quantile selector, between 0.0 and 1.0 (inclusive). + a-parameter + b-parameter + c-parameter + d-parameter + + + + Estimates the tau-th quantile from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + Sample array, must be sorted ascendingly. + Quantile selector, between 0.0 and 1.0 (inclusive). + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the empirical cumulative distribution function (CDF) at x from the sorted data array (ascending). + + The data sample sequence. + The value where to estimate the CDF at. + + + + Estimates the quantile tau from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile value. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Evaluates the rank of each entry of the sorted data array (ascending). + The rank definition can be specified to be compatible + with an existing system. + + + + + Returns the smallest value from the sorted data array (ascending). + + Sample array, must be sorted ascendingly. + + + + Returns the largest value from the sorted data array (ascending). + + Sample array, must be sorted ascendingly. + + + + Returns the order statistic (order 1..N) from the sorted data array (ascending). + + Sample array, must be sorted ascendingly. + One-based order of the statistic, must be between 1 and N (inclusive). + + + + Estimates the median value from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the p-Percentile value from the sorted data array (ascending). + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + Percentile selector, between 0 and 100 (inclusive). + + + + Estimates the first quartile value from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the third quartile value from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the inter-quartile range from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates {min, lower-quantile, median, upper-quantile, max} from the sorted data array (ascending). + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + + + + Estimates the tau-th quantile from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + Sample array, must be sorted ascendingly. + Quantile selector, between 0.0 and 1.0 (inclusive). + + R-8, SciPy-(1/3,1/3): + Linear interpolation of the approximate medians for order statistics. + When tau < (2/3) / (N + 1/3), use x1. When tau >= (N - 1/3) / (N + 1/3), use xN. + + + + + Estimates the tau-th quantile from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile defintion can be specified + by 4 parameters a, b, c and d, consistent with Mathematica. + + Sample array, must be sorted ascendingly. + Quantile selector, between 0.0 and 1.0 (inclusive). + a-parameter + b-parameter + c-parameter + d-parameter + + + + Estimates the tau-th quantile from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + Sample array, must be sorted ascendingly. + Quantile selector, between 0.0 and 1.0 (inclusive). + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the empirical cumulative distribution function (CDF) at x from the sorted data array (ascending). + + The data sample sequence. + The value where to estimate the CDF at. + + + + Estimates the quantile tau from the sorted data array (ascending). + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile value. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Evaluates the rank of each entry of the sorted data array (ascending). + The rank definition can be specified to be compatible + with an existing system. + + + + + Extension methods to return basic statistics on set of data. + + + + + Returns the minimum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Returns the minimum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Returns the minimum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + Null-entries are ignored. + + The sample data. + The minimum value in the sample data. + + + + Returns the maximum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The maximum value in the sample data. + + + + Returns the maximum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The maximum value in the sample data. + + + + Returns the maximum value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + Null-entries are ignored. + + The sample data. + The maximum value in the sample data. + + + + Returns the minimum absolute value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Returns the minimum absolute value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Returns the maximum absolute value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The maximum value in the sample data. + + + + Returns the maximum absolute value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The maximum value in the sample data. + + + + Returns the minimum magnitude and phase value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Returns the minimum magnitude and phase value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Returns the maximum magnitude and phase value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Returns the maximum magnitude and phase value in the sample data. + Returns NaN if data is empty or if any entry is NaN. + + The sample data. + The minimum value in the sample data. + + + + Evaluates the sample mean, an estimate of the population mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the mean of. + The mean of the sample. + + + + Evaluates the sample mean, an estimate of the population mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the mean of. + The mean of the sample. + + + + Evaluates the sample mean, an estimate of the population mean. + Returns NaN if data is empty or if any entry is NaN. + Null-entries are ignored. + + The data to calculate the mean of. + The mean of the sample. + + + + Evaluates the geometric mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the geometric mean of. + The geometric mean of the sample. + + + + Evaluates the geometric mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the geometric mean of. + The geometric mean of the sample. + + + + Evaluates the harmonic mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the harmonic mean of. + The harmonic mean of the sample. + + + + Evaluates the harmonic mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the harmonic mean of. + The harmonic mean of the sample. + + + + Estimates the unbiased population variance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population variance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population variance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + Null-entries are ignored. + + A subset of samples, sampled from the full population. + + + + Evaluates the variance from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + The full population data. + + + + Evaluates the variance from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + The full population data. + + + + Evaluates the variance from the provided full population. + On a dataset of size N will use an N normalize and would thus be biased if applied to a subsetr. + Returns NaN if data is empty or if any entry is NaN. + Null-entries are ignored. + + The full population data. + + + + Estimates the unbiased population standard deviation from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population standard deviation from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population standard deviation from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + Null-entries are ignored. + + A subset of samples, sampled from the full population. + + + + Evaluates the standard deviation from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + The full population data. + + + + Evaluates the standard deviation from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + The full population data. + + + + Evaluates the standard deviation from the provided full population. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + Null-entries are ignored. + + The full population data. + + + + Estimates the unbiased population skewness from the provided samples. + Uses a normalizer (Bessel's correction; type 2). + Returns NaN if data has less than three entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population skewness from the provided samples. + Uses a normalizer (Bessel's correction; type 2). + Returns NaN if data has less than three entries or if any entry is NaN. + Null-entries are ignored. + + A subset of samples, sampled from the full population. + + + + Evaluates the skewness from the full population. + Does not use a normalizer and would thus be biased if applied to a subset (type 1). + Returns NaN if data has less than two entries or if any entry is NaN. + + The full population data. + + + + Evaluates the skewness from the full population. + Does not use a normalizer and would thus be biased if applied to a subset (type 1). + Returns NaN if data has less than two entries or if any entry is NaN. + Null-entries are ignored. + + The full population data. + + + + Estimates the unbiased population kurtosis from the provided samples. + Uses a normalizer (Bessel's correction; type 2). + Returns NaN if data has less than four entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population kurtosis from the provided samples. + Uses a normalizer (Bessel's correction; type 2). + Returns NaN if data has less than four entries or if any entry is NaN. + Null-entries are ignored. + + A subset of samples, sampled from the full population. + + + + Evaluates the kurtosis from the full population. + Does not use a normalizer and would thus be biased if applied to a subset (type 1). + Returns NaN if data has less than three entries or if any entry is NaN. + + The full population data. + + + + Evaluates the kurtosis from the full population. + Does not use a normalizer and would thus be biased if applied to a subset (type 1). + Returns NaN if data has less than three entries or if any entry is NaN. + Null-entries are ignored. + + The full population data. + + + + Estimates the sample mean and the unbiased population variance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or if any entry is NaN and NaN for variance if data has less than two entries or if any entry is NaN. + + The data to calculate the mean of. + The mean of the sample. + + + + Estimates the sample mean and the unbiased population variance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or if any entry is NaN and NaN for variance if data has less than two entries or if any entry is NaN. + + The data to calculate the mean of. + The mean of the sample. + + + + Estimates the sample mean and the unbiased population standard deviation from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or if any entry is NaN and NaN for standard deviation if data has less than two entries or if any entry is NaN. + + The data to calculate the mean of. + The mean of the sample. + + + + Estimates the sample mean and the unbiased population standard deviation from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or if any entry is NaN and NaN for standard deviation if data has less than two entries or if any entry is NaN. + + The data to calculate the mean of. + The mean of the sample. + + + + Estimates the unbiased population skewness and kurtosis from the provided samples in a single pass. + Uses a normalizer (Bessel's correction; type 2). + + A subset of samples, sampled from the full population. + + + + Evaluates the skewness and kurtosis from the full population. + Does not use a normalizer and would thus be biased if applied to a subset (type 1). + + The full population data. + + + + Estimates the unbiased population covariance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population covariance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + A subset of samples, sampled from the full population. + A subset of samples, sampled from the full population. + + + + Estimates the unbiased population covariance from the provided samples. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + Null-entries are ignored. + + A subset of samples, sampled from the full population. + A subset of samples, sampled from the full population. + + + + Evaluates the population covariance from the provided full populations. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + The full population data. + The full population data. + + + + Evaluates the population covariance from the provided full populations. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + The full population data. + The full population data. + + + + Evaluates the population covariance from the provided full populations. + On a dataset of size N will use an N normalize and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + Null-entries are ignored. + + The full population data. + The full population data. + + + + Evaluates the root mean square (RMS) also known as quadratic mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the RMS of. + + + + Evaluates the root mean square (RMS) also known as quadratic mean. + Returns NaN if data is empty or if any entry is NaN. + + The data to calculate the RMS of. + + + + Evaluates the root mean square (RMS) also known as quadratic mean. + Returns NaN if data is empty or if any entry is NaN. + Null-entries are ignored. + + The data to calculate the mean of. + + + + Estimates the sample median from the provided samples (R8). + + The data sample sequence. + + + + Estimates the sample median from the provided samples (R8). + + The data sample sequence. + + + + Estimates the sample median from the provided samples (R8). + + The data sample sequence. + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the tau-th quantile from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile definition, to choose what product/definition it should be consistent with + + + + Estimates the p-Percentile value from the provided samples. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + Percentile selector, between 0 and 100 (inclusive). + + + + Estimates the p-Percentile value from the provided samples. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + Percentile selector, between 0 and 100 (inclusive). + + + + Estimates the p-Percentile value from the provided samples. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + Percentile selector, between 0 and 100 (inclusive). + + + + Estimates the p-Percentile value from the provided samples. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the p-Percentile value from the provided samples. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the p-Percentile value from the provided samples. + If a non-integer Percentile is needed, use Quantile instead. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the first quartile value from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the first quartile value from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the first quartile value from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the third quartile value from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the third quartile value from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the third quartile value from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the inter-quartile range from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the inter-quartile range from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates the inter-quartile range from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates {min, lower-quantile, median, upper-quantile, max} from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates {min, lower-quantile, median, upper-quantile, max} from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Estimates {min, lower-quantile, median, upper-quantile, max} from the provided samples. + Approximately median-unbiased regardless of the sample distribution (R8). + + The data sample sequence. + + + + Returns the order statistic (order 1..N) from the provided samples. + + The data sample sequence. + One-based order of the statistic, must be between 1 and N (inclusive). + + + + Returns the order statistic (order 1..N) from the provided samples. + + The data sample sequence. + One-based order of the statistic, must be between 1 and N (inclusive). + + + + Returns the order statistic (order 1..N) from the provided samples. + + The data sample sequence. + + + + Returns the order statistic (order 1..N) from the provided samples. + + The data sample sequence. + + + + Evaluates the rank of each entry of the provided samples. + The rank definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Evaluates the rank of each entry of the provided samples. + The rank definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Evaluates the rank of each entry of the provided samples. + The rank definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Estimates the quantile tau from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile value. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Estimates the quantile tau from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile value. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Estimates the quantile tau from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Quantile value. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Estimates the quantile tau from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Estimates the quantile tau from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Estimates the quantile tau from the provided samples. + The tau-th quantile is the data value where the cumulative distribution + function crosses tau. The quantile definition can be specified to be compatible + with an existing system. + + The data sample sequence. + Rank definition, to choose how ties should be handled and what product/definition it should be consistent with + + + + Estimates the empirical cumulative distribution function (CDF) at x from the provided samples. + + The data sample sequence. + The value where to estimate the CDF at. + + + + Estimates the empirical cumulative distribution function (CDF) at x from the provided samples. + + The data sample sequence. + The value where to estimate the CDF at. + + + + Estimates the empirical cumulative distribution function (CDF) at x from the provided samples. + + The data sample sequence. + The value where to estimate the CDF at. + + + + Estimates the empirical cumulative distribution function (CDF) at x from the provided samples. + + The data sample sequence. + + + + Estimates the empirical cumulative distribution function (CDF) at x from the provided samples. + + The data sample sequence. + + + + Estimates the empirical cumulative distribution function (CDF) at x from the provided samples. + + The data sample sequence. + + + + Estimates the empirical inverse CDF at tau from the provided samples. + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + + + + Estimates the empirical inverse CDF at tau from the provided samples. + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + + + + Estimates the empirical inverse CDF at tau from the provided samples. + + The data sample sequence. + Quantile selector, between 0.0 and 1.0 (inclusive). + + + + Estimates the empirical inverse CDF at tau from the provided samples. + + The data sample sequence. + + + + Estimates the empirical inverse CDF at tau from the provided samples. + + The data sample sequence. + + + + Estimates the empirical inverse CDF at tau from the provided samples. + + The data sample sequence. + + + + Calculates the entropy of a stream of double values in bits. + Returns NaN if any of the values in the stream are NaN. + + The data sample sequence. + + + + Calculates the entropy of a stream of double values in bits. + Returns NaN if any of the values in the stream are NaN. + Null-entries are ignored. + + The data sample sequence. + + + + Evaluates the sample mean over a moving window, for each samples. + Returns NaN if no data is empty or if any entry is NaN. + + The sample stream to calculate the mean of. + The number of last samples to consider. + + + + Statistics operating on an IEnumerable in a single pass, without keeping the full data in memory. + Can be used in a streaming way, e.g. on large datasets not fitting into memory. + + + + + + + + Returns the smallest value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the smallest value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the largest value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the largest value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the smallest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the smallest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the largest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the largest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the smallest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the smallest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the largest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Returns the largest absolute value from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the arithmetic sample mean from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the arithmetic sample mean from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the geometric mean of the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the geometric mean of the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the harmonic mean of the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the harmonic mean of the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the unbiased population variance from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the unbiased population variance from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the population variance from the full population provided as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the population variance from the full population provided as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the unbiased population standard deviation from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the unbiased population standard deviation from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the population standard deviation from the full population provided as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Evaluates the population standard deviation from the full population provided as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population variance from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN, and NaN for variance if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population variance from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN, and NaN for variance if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population standard deviation from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN, and NaN for standard deviation if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the arithmetic sample mean and the unbiased population standard deviation from the provided samples as enumerable sequence, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN for mean if data is empty or any entry is NaN, and NaN for standard deviation if data has less than two entries or if any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the unbiased population covariance from the provided two sample enumerable sequences, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + First sample stream. + Second sample stream. + + + + Estimates the unbiased population covariance from the provided two sample enumerable sequences, in a single pass without memoization. + On a dataset of size N will use an N-1 normalizer (Bessel's correction). + Returns NaN if data has less than two entries or if any entry is NaN. + + First sample stream. + Second sample stream. + + + + Evaluates the population covariance from the full population provided as two enumerable sequences, in a single pass without memoization. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + First population stream. + Second population stream. + + + + Evaluates the population covariance from the full population provided as two enumerable sequences, in a single pass without memoization. + On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset. + Returns NaN if data is empty or if any entry is NaN. + + First population stream. + Second population stream. + + + + Estimates the root mean square (RMS) also known as quadratic mean from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Estimates the root mean square (RMS) also known as quadratic mean from the enumerable, in a single pass without memoization. + Returns NaN if data is empty or any entry is NaN. + + Sample stream, no sorting is assumed. + + + + Calculates the entropy of a stream of double values. + Returns NaN if any of the values in the stream are NaN. + + The input stream to evaluate. + + + + + Used to simplify parallel code, particularly between the .NET 4.0 and Silverlight Code. + + + + + Executes a for loop in which iterations may run in parallel. + + The start index, inclusive. + The end index, exclusive. + The body to be invoked for each iteration range. + + + + Executes a for loop in which iterations may run in parallel. + + The start index, inclusive. + The end index, exclusive. + The partition size for splitting work into smaller pieces. + The body to be invoked for each iteration range. + + + + Executes each of the provided actions inside a discrete, asynchronous task. + + An array of actions to execute. + The actions array contains a null element. + At least one invocation of the actions threw an exception. + + + + Selects an item (such as Max or Min). + + Starting index of the loop. + Ending index of the loop + The function to select items over a subset. + The function to select the item of selection from the subsets. + The selected value. + + + + Selects an item (such as Max or Min). + + The array to iterate over. + The function to select items over a subset. + The function to select the item of selection from the subsets. + The selected value. + + + + Selects an item (such as Max or Min). + + Starting index of the loop. + Ending index of the loop + The function to select items over a subset. + The function to select the item of selection from the subsets. + Default result of the reduce function on an empty set. + The selected value. + + + + Selects an item (such as Max or Min). + + The array to iterate over. + The function to select items over a subset. + The function to select the item of selection from the subsets. + Default result of the reduce function on an empty set. + The selected value. + + + + Double-precision trigonometry toolkit. + + + + + Constant to convert a degree to grad. + + + + + Converts a degree (360-periodic) angle to a grad (400-periodic) angle. + + The degree to convert. + The converted grad angle. + + + + Converts a degree (360-periodic) angle to a radian (2*Pi-periodic) angle. + + The degree to convert. + The converted radian angle. + + + + Converts a grad (400-periodic) angle to a degree (360-periodic) angle. + + The grad to convert. + The converted degree. + + + + Converts a grad (400-periodic) angle to a radian (2*Pi-periodic) angle. + + The grad to convert. + The converted radian. + + + + Converts a radian (2*Pi-periodic) angle to a degree (360-periodic) angle. + + The radian to convert. + The converted degree. + + + + Converts a radian (2*Pi-periodic) angle to a grad (400-periodic) angle. + + The radian to convert. + The converted grad. + + + + Normalized Sinc function. sinc(x) = sin(pi*x)/(pi*x). + + + + + Trigonometric Sine of an angle in radian, or opposite / hypotenuse. + + The angle in radian. + The sine of the radian angle. + + + + Trigonometric Sine of a Complex number. + + The complex value. + The sine of the complex number. + + + + Trigonometric Cosine of an angle in radian, or adjacent / hypotenuse. + + The angle in radian. + The cosine of an angle in radian. + + + + Trigonometric Cosine of a Complex number. + + The complex value. + The cosine of a complex number. + + + + Trigonometric Tangent of an angle in radian, or opposite / adjacent. + + The angle in radian. + The tangent of the radian angle. + + + + Trigonometric Tangent of a Complex number. + + The complex value. + The tangent of the complex number. + + + + Trigonometric Cotangent of an angle in radian, or adjacent / opposite. Reciprocal of the tangent. + + The angle in radian. + The cotangent of an angle in radian. + + + + Trigonometric Cotangent of a Complex number. + + The complex value. + The cotangent of the complex number. + + + + Trigonometric Secant of an angle in radian, or hypotenuse / adjacent. Reciprocal of the cosine. + + The angle in radian. + The secant of the radian angle. + + + + Trigonometric Secant of a Complex number. + + The complex value. + The secant of the complex number. + + + + Trigonometric Cosecant of an angle in radian, or hypotenuse / opposite. Reciprocal of the sine. + + The angle in radian. + Cosecant of an angle in radian. + + + + Trigonometric Cosecant of a Complex number. + + The complex value. + The cosecant of a complex number. + + + + Trigonometric principal Arc Sine in radian + + The opposite for a unit hypotenuse (i.e. opposite / hyptenuse). + The angle in radian. + + + + Trigonometric principal Arc Sine of this Complex number. + + The complex value. + The arc sine of a complex number. + + + + Trigonometric principal Arc Cosine in radian + + The adjacent for a unit hypotenuse (i.e. adjacent / hypotenuse). + The angle in radian. + + + + Trigonometric principal Arc Cosine of this Complex number. + + The complex value. + The arc cosine of a complex number. + + + + Trigonometric principal Arc Tangent in radian + + The opposite for a unit adjacent (i.e. opposite / adjacent). + The angle in radian. + + + + Trigonometric principal Arc Tangent of this Complex number. + + The complex value. + The arc tangent of a complex number. + + + + Trigonometric principal Arc Cotangent in radian + + The adjacent for a unit opposite (i.e. adjacent / opposite). + The angle in radian. + + + + Trigonometric principal Arc Cotangent of this Complex number. + + The complex value. + The arc cotangent of a complex number. + + + + Trigonometric principal Arc Secant in radian + + The hypotenuse for a unit adjacent (i.e. hypotenuse / adjacent). + The angle in radian. + + + + Trigonometric principal Arc Secant of this Complex number. + + The complex value. + The arc secant of a complex number. + + + + Trigonometric principal Arc Cosecant in radian + + The hypotenuse for a unit opposite (i.e. hypotenuse / opposite). + The angle in radian. + + + + Trigonometric principal Arc Cosecant of this Complex number. + + The complex value. + The arc cosecant of a complex number. + + + + Hyperbolic Sine + + The hyperbolic angle, i.e. the area of the hyperbolic sector. + The hyperbolic sine of the angle. + + + + Hyperbolic Sine of a Complex number. + + The complex value. + The hyperbolic sine of a complex number. + + + + Hyperbolic Cosine + + The hyperbolic angle, i.e. the area of the hyperbolic sector. + The hyperbolic Cosine of the angle. + + + + Hyperbolic Cosine of a Complex number. + + The complex value. + The hyperbolic cosine of a complex number. + + + + Hyperbolic Tangent in radian + + The hyperbolic angle, i.e. the area of the hyperbolic sector. + The hyperbolic tangent of the angle. + + + + Hyperbolic Tangent of a Complex number. + + The complex value. + The hyperbolic tangent of a complex number. + + + + Hyperbolic Cotangent + + The hyperbolic angle, i.e. the area of the hyperbolic sector. + The hyperbolic cotangent of the angle. + + + + Hyperbolic Cotangent of a Complex number. + + The complex value. + The hyperbolic cotangent of a complex number. + + + + Hyperbolic Secant + + The hyperbolic angle, i.e. the area of the hyperbolic sector. + The hyperbolic secant of the angle. + + + + Hyperbolic Secant of a Complex number. + + The complex value. + The hyperbolic secant of a complex number. + + + + Hyperbolic Cosecant + + The hyperbolic angle, i.e. the area of the hyperbolic sector. + The hyperbolic cosecant of the angle. + + + + Hyperbolic Cosecant of a Complex number. + + The complex value. + The hyperbolic cosecant of a complex number. + + + + Hyperbolic Area Sine + + The real value. + The hyperbolic angle, i.e. the area of its hyperbolic sector. + + + + Hyperbolic Area Sine of this Complex number. + + The complex value. + The hyperbolic arc sine of a complex number. + + + + Hyperbolic Area Cosine + + The real value. + The hyperbolic angle, i.e. the area of its hyperbolic sector. + + + + Hyperbolic Area Cosine of this Complex number. + + The complex value. + The hyperbolic arc cosine of a complex number. + + + + Hyperbolic Area Tangent + + The real value. + The hyperbolic angle, i.e. the area of its hyperbolic sector. + + + + Hyperbolic Area Tangent of this Complex number. + + The complex value. + The hyperbolic arc tangent of a complex number. + + + + Hyperbolic Area Cotangent + + The real value. + The hyperbolic angle, i.e. the area of its hyperbolic sector. + + + + Hyperbolic Area Cotangent of this Complex number. + + The complex value. + The hyperbolic arc cotangent of a complex number. + + + + Hyperbolic Area Secant + + The real value. + The hyperbolic angle, i.e. the area of its hyperbolic sector. + + + + Hyperbolic Area Secant of this Complex number. + + The complex value. + The hyperbolic arc secant of a complex number. + + + + Hyperbolic Area Cosecant + + The real value. + The hyperbolic angle, i.e. the area of its hyperbolic sector. + + + + Hyperbolic Area Cosecant of this Complex number. + + The complex value. + The hyperbolic arc cosecant of a complex number. + + + + Hamming window. Named after Richard Hamming. + Symmetric version, useful e.g. for filter design purposes. + + + + + Hamming window. Named after Richard Hamming. + Periodic version, useful e.g. for FFT purposes. + + + + + Hann window. Named after Julius von Hann. + Symmetric version, useful e.g. for filter design purposes. + + + + + Hann window. Named after Julius von Hann. + Periodic version, useful e.g. for FFT purposes. + + + + + Cosine window. + Symmetric version, useful e.g. for filter design purposes. + + + + + Cosine window. + Periodic version, useful e.g. for FFT purposes. + + + + + Lanczos window. + Symmetric version, useful e.g. for filter design purposes. + + + + + Lanczos window. + Periodic version, useful e.g. for FFT purposes. + + + + + Gauss window. + + + + + Blackman window. + + + + + Blackman-Harris window. + + + + + Blackman-Nuttall window. + + + + + Bartlett window. + + + + + Bartlett-Hann window. + + + + + Nuttall window. + + + + + Flat top window. + + + + + Uniform rectangular (dirichlet) window. + + + + + Triangular window. + + + + + A strongly-typed resource class, for looking up localized strings, etc. + + + + + Returns the cached ResourceManager instance used by this class. + + + + + Overrides the current thread's CurrentUICulture property for all + resource lookups using this strongly typed resource class. + + + + + Looks up a localized string similar to The accuracy couldn't be reached with the specified number of iterations.. + + + + + Looks up a localized string similar to The array arguments must have the same length.. + + + + + Looks up a localized string similar to The given array has the wrong length. Should be {0}.. + + + + + Looks up a localized string similar to The argument must be between 0 and 1.. + + + + + Looks up a localized string similar to Value cannot be in the range -1 < x < 1.. + + + + + Looks up a localized string similar to Value must be even.. + + + + + Looks up a localized string similar to The histogram does not contain the value.. + + + + + Looks up a localized string similar to Value is expected to be between {0} and {1} (including {0} and {1}).. + + + + + Looks up a localized string similar to At least one item of {0} is a null reference (Nothing in Visual Basic).. + + + + + Looks up a localized string similar to Value must be greater than or equal to one.. + + + + + Looks up a localized string similar to Matrix dimensions must agree.. + + + + + Looks up a localized string similar to Matrix dimensions must agree: {0}.. + + + + + Looks up a localized string similar to Matrix dimensions must agree: op1 is {0}, op2 is {1}.. + + + + + Looks up a localized string similar to Matrix dimensions must agree: op1 is {0}, op2 is {1}, op3 is {2}.. + + + + + Looks up a localized string similar to The requested matrix does not exist.. + + + + + Looks up a localized string similar to The matrix indices must not be out of range of the given matrix.. + + + + + Looks up a localized string similar to Matrix must not be rank deficient.. + + + + + Looks up a localized string similar to Matrix must not be singular.. + + + + + Looks up a localized string similar to Matrix must be positive definite.. + + + + + Looks up a localized string similar to Matrix column dimensions must agree.. + + + + + Looks up a localized string similar to Matrix row dimensions must agree.. + + + + + Looks up a localized string similar to Matrix must have exactly one column.. + + + + + Looks up a localized string similar to Matrix must have exactly one column and row, thus have only one cell.. + + + + + Looks up a localized string similar to Matrix must have exactly one row.. + + + + + Looks up a localized string similar to Matrix must be square.. + + + + + Looks up a localized string similar to Matrix must be symmetric.. + + + + + Looks up a localized string similar to Matrix must be symmetric positive definite.. + + + + + Looks up a localized string similar to In the specified range, the exclusive maximum must be greater than the inclusive minimum.. + + + + + Looks up a localized string similar to In the specified range, the minimum is greater than maximum.. + + + + + Looks up a localized string similar to Value must be positive.. + + + + + Looks up a localized string similar to Value must neither be infinite nor NaN.. + + + + + Looks up a localized string similar to Value must not be negative (zero is ok).. + + + + + Looks up a localized string similar to {0} is a null reference (Nothing in Visual Basic).. + + + + + Looks up a localized string similar to Value must be odd.. + + + + + Looks up a localized string similar to {0} must be greater than {1}.. + + + + + Looks up a localized string similar to {0} must be greater than or equal to {1}.. + + + + + Looks up a localized string similar to {0} must be smaller than {1}.. + + + + + Looks up a localized string similar to {0} must be smaller than or equal to {1}.. + + + + + Looks up a localized string similar to The chosen parameter set is invalid (probably some value is out of range).. + + + + + Looks up a localized string similar to The given expression does not represent a complex number.. + + + + + Looks up a localized string similar to Value must be positive (and not zero).. + + + + + Looks up a localized string similar to Size must be a Power of Two.. + + + + + Looks up a localized string similar to Size must be a Power of Two in every dimension.. + + + + + Looks up a localized string similar to The range between {0} and {1} must be less than or equal to {2}.. + + + + + Looks up a localized string similar to Arguments must be different objects.. + + + + + Looks up a localized string similar to Array must have exactly one dimension (and not be null).. + + + + + Looks up a localized string similar to Value is too large.. + + + + + Looks up a localized string similar to Value is too large for the current iteration limit.. + + + + + Looks up a localized string similar to Type mismatch.. + + + + + Looks up a localized string similar to The upper bound must be strictly larger than the lower bound.. + + + + + Looks up a localized string similar to The upper bound must be at least as large as the lower bound.. + + + + + Looks up a localized string similar to Array length must be a multiple of {0}.. + + + + + Looks up a localized string similar to All vectors must have the same dimensionality.. + + + + + Looks up a localized string similar to The vector must have 3 dimensions.. + + + + + Looks up a localized string similar to The given array is too small. It must be at least {0} long.. + + + + + Looks up a localized string similar to Big endian files are not supported.. + + + + + Looks up a localized string similar to The supplied collection is empty.. + + + + + Looks up a localized string similar to Complex matrices are not supported.. + + + + + Looks up a localized string similar to An algorithm failed to converge.. + + + + + Looks up a localized string similar to The sample size must be larger than the given degrees of freedom.. + + + + + Looks up a localized string similar to This feature is not implemented yet (but is planned).. + + + + + Looks up a localized string similar to The given file doesn't exist.. + + + + + Looks up a localized string similar to Sample points should be sorted in strictly ascending order. + + + + + Looks up a localized string similar to All sample points should be unique.. + + + + + Looks up a localized string similar to Invalid parameterization for the distribution.. + + + + + Looks up a localized string similar to Invalid Left Boundary Condition.. + + + + + Looks up a localized string similar to The operation could not be performed because the accumulator is empty.. + + + + + Looks up a localized string similar to The operation could not be performed because the histogram is empty.. + + + + + Looks up a localized string similar to Not enough points in the distribution.. + + + + + Looks up a localized string similar to No Samples Provided. Preparation Required.. + + + + + Looks up a localized string similar to An invalid parameter was passed to a native method.. + + + + + Looks up a localized string similar to An invalid parameter was passed to a native method, parameter number : {0}. + + + + + Looks up a localized string similar to Invalid Right Boundary Condition.. + + + + + Looks up a localized string similar to Lag must be positive. + + + + + Looks up a localized string similar to Lag must be smaller than the sample size. + + + + + Looks up a localized string similar to ddd MMM dd HH:mm:ss yyyy. + + + + + Looks up a localized string similar to Matrices can not be empty and must have at least one row and column.. + + + + + Looks up a localized string similar to The number of columns of a matrix must be positive.. + + + + + Looks up a localized string similar to Matrix must be in sparse storage format. + + + + + Looks up a localized string similar to The number of rows of a matrix must be positive.. + + + + + Looks up a localized string similar to The number of rows or columns of a matrix must be positive.. + + + + + Looks up a localized string similar to Unable to allocate native memory.. + + + + + Looks up a localized string similar to Only 1 and 2 dimensional arrays are supported.. + + + + + Looks up a localized string similar to Data must contain at least {0} values.. + + + + + Looks up a localized string similar to Name cannot contain a space. name: {0}. + + + + + Looks up a localized string similar to {0} is not a supported type.. + + + + + Looks up a localized string similar to Algorithm experience a numerical break down + . + + + + + Looks up a localized string similar to The two arguments can't be compared (maybe they are part of a partial ordering?). + + + + + Looks up a localized string similar to The integer array does not represent a valid permutation.. + + + + + Looks up a localized string similar to The sampler's proposal distribution is not upper bounding the target density.. + + + + + Looks up a localized string similar to A regression of the requested order requires at least {0} samples. Only {1} samples have been provided. . + + + + + Looks up a localized string similar to The algorithm has failed, exceeded the number of iterations allowed or there is no root within the provided bounds.. + + + + + Looks up a localized string similar to The algorithm has failed, exceeded the number of iterations allowed or there is no root within the provided bounds. Consider to use RobustNewtonRaphson instead.. + + + + + Looks up a localized string similar to The lower and upper bounds must bracket a single root.. + + + + + Looks up a localized string similar to The algorithm ended without root in the range.. + + + + + Looks up a localized string similar to The number of rows must greater than or equal to the number of columns.. + + + + + Looks up a localized string similar to All sample vectors must have the same length. However, vectors with disagreeing length {0} and {1} have been provided. A sample with index i is given by the value at index i of each provided vector.. + + + + + Looks up a localized string similar to U is singular, and the inversion could not be completed.. + + + + + Looks up a localized string similar to U is singular, and the inversion could not be completed. The {0}-th diagonal element of the factor U is zero.. + + + + + Looks up a localized string similar to The singular vectors were not computed.. + + + + + Looks up a localized string similar to This special case is not supported yet (but is planned).. + + + + + Looks up a localized string similar to The given stop criterion already exist in the collection.. + + + + + Looks up a localized string similar to There is no stop criterion in the collection.. + + + + + Looks up a localized string similar to String parameter cannot be empty or null.. + + + + + Looks up a localized string similar to We only support sparse matrix with less than int.MaxValue elements.. + + + + + Looks up a localized string similar to The moment of the distribution is undefined.. + + + + + Looks up a localized string similar to A user defined provider has not been specified.. + + + + + Looks up a localized string similar to User work buffers are not supported by this provider.. + + + + + Looks up a localized string similar to Vectors can not be empty and must have at least one element.. + + + + + Looks up a localized string similar to The given work array is too small. Check work[0] for the corret size.. + + +
+
diff --git a/src/packages/MathNet.Numerics.3.16.0/lib/portable-net45+sl5+netcore45+MonoAndroid1+MonoTouch1/MathNet.Numerics.dll b/src/packages/MathNet.Numerics.3.16.0/lib/portable-net45+sl5+netcore45+MonoAndroid1+MonoTouch1/MathNet.Numerics.dll new file mode 100644 index 0000000..2e97007 Binary files /dev/null and b/src/packages/MathNet.Numerics.3.16.0/lib/portable-net45+sl5+netcore45+MonoAndroid1+MonoTouch1/MathNet.Numerics.dll differ diff --git a/src/packages/MathNet.Numerics.3.16.0/license.txt b/src/packages/MathNet.Numerics.3.16.0/license.txt new file mode 100644 index 0000000..9297bd8 --- /dev/null +++ b/src/packages/MathNet.Numerics.3.16.0/license.txt @@ -0,0 +1,25 @@ +Math.NET Numerics License (MIT/X11) +=================================== + +Copyright (c) 2002-2015 Math.NET + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. diff --git a/src/packages/MathNet.Numerics.3.16.0/readme.txt b/src/packages/MathNet.Numerics.3.16.0/readme.txt new file mode 100644 index 0000000..242aa59 --- /dev/null +++ b/src/packages/MathNet.Numerics.3.16.0/readme.txt @@ -0,0 +1,511 @@ + __ __ _ _ _ _ ______ _______ + | \/ | | | | | | \ | | ____|__ __| + | \ / | __ _| |_| |__ | \| | |__ | | + | |\/| |/ _` | __| '_ \ | . ` | __| | | + | | | | (_| | |_| | | |_| |\ | |____ | | + |_| |_|\__,_|\__|_| |_(_)_| \_|______| |_| + + Math.NET Numerics - https://numerics.mathdotnet.com + Copyright (c) Math.NET - Open Source MIT/X11 License + + Math.NET Numerics v3.16.0 + +### 3.16.0 - 2017-01-03 +* Root Finding: improve accuracy handling *~Konstantin Tretyakov* +* Regression: GoodnessOfFit StandardError *~David Falkner* + +### 3.15.0 - 2016-12-27 +* FFT: MKL native provider backend. +* FFT: 2D and multi-dimensional FFT (only supported by MKL provider, managed provider pending). +* FFT: real conjugate-even FFT (only leveraging symmetry in MKL provider). +* FFT: managed provider significantly faster on x64. +* Linear Algebra: pointwise trigonometric and basic functions *~Albert Pang* +* Linear Algebra: better support for F# built-in operators (sqrt, sin, exp, ..) *~Albert Pang* +* Linear Algebra: pointwise power operator (F#) +* Linear Algebra: enable experimental matrix product implementation +* Linear Algebra: better support for matrix to/from row-major arrays and enumerables +* Linear Algebra: transport allows specifying a result matrix to transpose into, inplace if square +* Linear Algebra: vector and matrix AsArray and similar to access internal arrays if applicable +* Linear Algebra: vector and matrix pointwise min/max and absmin/absmax +* Linear Algebra: dot-power on vectors and matrices, supporting native providers. +* Linear Algebra: matrix Moore-Penrose pseudo-inverse (SVD backed). +* Provider Control: separate Control classes for LA and FFT Providers. +* Provider Control: avoid internal exceptions on provider discovery. +* Distributions: fix misleading inline docs on Negative-Binomial. +* Generate: linear integer ranges +* Root Finding: extend zero-crossing bracketing in derivative-free algorithms. +* Window: periodic versions of Hamming, Hann, Cosine and Lanczos windows. +* Special Functions: more robust GammaLowerRegularizedInv (and Gamma.InvCDF). +* BUG: ODE Solver: fix bug in Runge-Kutta second order routine *~Ksero* + +### 3.13.1 - 2016-09-06 +* BUG: Random: Next(x,x+1) must always return x *~Juri* + +### 3.13.0 - 2016-08-18 +* Linear Algebra: faster tall, wide managed matrix multiplication. *~Aixile* +* Euclid: Integer Log2 (DeBruijn sequencences algorithm). +* Integration: Gauss-Legendre documentation, cleanup. *~Larz White* +* Random: Integer sub-range sampling to use rejection sampling to avoid bias. +* Random: Improvements on integer and byte sampling. +* BUG: Random: CryptoRandomSource must not generate 1.0 samples. +* BUG: Statistics: fixed bug in WeightedPearson Correlation. *~Jon Smit* + +### 3.12.0 - 2016-07-03 +* ODE Solver: Runge-Kutta (order 2, 4) and Adams-Bashforth (order 1-4) algorithms *~Yoonku Hwang* +* Linear Algebra: faster multiplication of sparse with dense matrices *~Arthur* +* BUG: Integration: Gauss-Legendre on order 256 *~Sergey Kosukhin* +* BUG: Distributions: ChiSquared sampling was taking a square root where it should not *~Florian Wechsung* + +### 3.11.1 - 2016-04-24 +* BUG: Linear Algebra: sparse vector pointwise multiply/divide to itself +* BUG: Linear Algebra: Vector.ToVectorString if the first column is wider than maxWidth + +### 3.11.0 - 2016-02-13 +* Special Functions: error functions to use static coefficient arrays (perf) *~Joel Sleppy* +* Integration: Gauss-Legendre Rule (1D, 2D) *~Larz White* +* Complex: more robust magnitude and division for numbers close to MaxValue or Epsilon *~MaLiN2223* +* Native Providers: lazy default provider discovery & initialization *~Kuan Bartel* +* FSharp Package: Quaternion type *~Phil Cleveland* + +### 3.10.0 - 2015-12-30 +* Statistics: single-precision floating point support. +* Statistics: very limited support for int32 and complex numbers. +* Statistics: Min/Max Absolute, MagnitudePhase (complex). +* Statistics: FiveNumberSummary to use actual Median instead of R8 quantile. +* Linear Algebra: matrix Rank to use relative epsilon. +* Linear Algebra: extensions to convert between single/double precision, complex/real. +* Linear Algebra: Vector/Matrix storage DataContracts for ephemeral serialization. +* Regression: more helpful exceptions and messages. +* Random: 'Next' integer sampling no longer involves floating points, avoids one-off error in MersenneTwister. +* Precision: EpsilonOf for single-precision numbers, drop no longer needed portable fallbacks. + +### 3.9.0 - 2015-11-25 +* Distributions: Normal.CDF avoids problematic subtraction by using Erfc instead of Erf. +* Statistics: geometric and harmonic mean. +* Statistics: DataContracts for ephemeral serialization on RunningStatistics, DescriptiveStatistics and Histogram. +* BUG: Statistics: Histogram did not adjust lower bound correctly when value was equal to the bound *~Volker Breuer* +* Linear Algebra: minor optimization on how we call Array.Copy. +* BUG: Linear Algebra: fix bug in Complex and Complex32 SparseMatrix.ToTypeString. + +### 3.8.0 - 2015-09-26 +* Distributions: PDF and CDF more robust for large distribution parameters. +* Distributions: BetaScaled distribution. +* Distributions: method to create a PERT distribution (based on BetaScaled) *~John C Barstow* +* Distributions: Weibull.Estimate *~Jon Larborn* +* Random: NextBoolean extensions. +* Root Finding: RootFinding.Secant (based on NewtonRaphson) *~grovesNL* +* Linear Algebra: Matrix Rank calculation now uses a tolerance based on the matrix size. +* Linear Algebra: Alternative CreateMatrix/Vector functions with type parameter on functions instead of type. +* Linear Algebra: MKL LinearAlgebra provider requires at least native provider r9 (linear algebra v2.0). +* Native Providers: automatic handling of intermediate work arrays/buffers in MKL and OpenBLAS providers *~Marcus Cuda, Kuan Bartel* +* Native Providers: automatically use native provider if available. +* Native Providers: new Control.TryUse* to make it simpler to use providers if available but without failing if not. +* Native Providers: improved error state checking and handling *~Marcus Cuda, Kuan Bartel* +* Combinatorics: generate or select random permutation, combination or variation (shuffling) +* Finance: rename CompoundMonthlyReturn to CompoundReturn (old now obsolete). + +### 3.7.1 - 2015-09-10 +* BUG: Linear Algebra: fix optimized path of adding a sparse matrix to itself. + +### 3.7.0 - 2015-05-09 +* Statistics: RunningStatistics now propagates min/max on Combine, handles NaN on Push. +* Statistics: new MovingStatistics providing descriptive statistics over a moving window *~Marcus Cuda* +* Statistics: new Statistics.MovingAverage. +* Statistics: Improved Histogram handling of small-width buckets *~Justin Needham* +* Distributions: ChiSquare.InvCDF *~logophobia* +* FFT: Fourier.FrequencyScale to generate the frequency corresponding to each index in frequency space. +* BUG: FFT: fix Bluestein algorithm for sequences with more than 46341 samples but not power-of-two. +* Linear Algebra: SparseVector.AbsoluteMaximumIndex *~Matt Heffron* +* MKL Native Provider: OSX build script *~Marcus Cuda* +* MKL Native Provider: new combined NuGet package with a proper build target (no more manual file handling needed). +* OpenBLAS Native Provider: a new linear algebra provider using OpenBLAS *~Kuan Bartel* +* CUDA Native Provider: a new GPU-based linear algebra provider using Nvidia CUDA *~Matthew A. Johnson* +* Native Providers: now versioned separately for each kind (MKL, CUDA, OpenBLAS). + +### 3.6.0 - 2015-03-22 +* Distributions: ChiSquare.CDF more robust for large numbers *~Baltazar Bieniek* +* Linear Algebra: MatrixStorage.Map2 equivalent to VectorStorage.Map2 +* Linear Algebra: Matrix and Vector Find/Find2, Exists/Exists2, ForAll/ForAll2 +* Linear Algebra: more consistent range checking in MatrixStorage.Clear and related +* Linear Algebra: mixed-storage fall back implementations now leverage higher-order functions +* BUG: Linear Algebra: fix loop range in MatrixStorage.ClearColumns (built-in storage not affected) +* BUG: Linear Algebra: fix sparse matrix equality. +* BUG: Linear Algebra: ArgumentException instead of index exception when trying to create an empty matrix. +* Generate: Unfold, Fibonacci; Normal and Standard replacing Gaussian and Stable. +* Native Providers: NativeProviderLoader to automatically load the provider for the matching processor architecture (x86, x64) *~Kuan Bartel* +* Native Providers: Control.NativeProviderPath allowing to explicitly declare where to load binaries from. +* MKL Native Provider: support for native complex eigen-value decomposition *~Marcus Cuda* +* MKL Native Provider: non-convergence checks in singular-value and eigen-value decompositions *~Marcus Cuda* + +### 3.5.0 - 2015-01-10 +* Differentiation: derivative, partial and mixed partial; hessian & jacobian *~Hythem Sidky* +* Differentiation: Differentiate facade class for simple use cases +* Differentiation: F# module for better F# function support. +* Linear Algebra: matrix ToRowArrays/ToColumnArrays +* Linear Algebra: F# insertRow, appendRow, prependRow and same also for columns +* Linear Algebra: F# append, stack and ofMatrixList2 +* Precision: measured machine epsilon, positive vs negative epsilon + +### 3.4.0 - 2015-01-04 +* Special Functions: Generalized Exponential Integral *~Ashley Messer* +* Special Functions: Regularized Incomplete Gamma domain extended to a=0 *~Ashley Messer* +* Statistics: weighted Pearson correlation *~ViK* +* MKL Native Provider: memory functions to free buffers and gather usage statistics *~Marcus Cuda* +* F#: depend on new official FSharp.Core NuGet package instead of FSharp.Core.Microsoft.Signed +* F#: simpler NuGet package dependencies (no more need for framework groups) +* Build: vagrant bootstrap now uses the latest xamarin mono packages + +### 3.3.0 - 2014-11-26 +* Linear Algebra: Vector.Fold2 (fold2 in F#), storage optimized +* Linear Algebra: Minor change how matrix products call the LA provider +* Linear Algebra: Random generation now leveraging array sampling routines +* BUG: Linear Algebra: fix bug when manually assigning System.Random to random distribution +* Root Finding: Change Brent tolerance check, add bracket check *~Hythen Sidky* +* Root Finding: Auto zero-crossing bracketing in FindRoots facade (not in algorithms) +* Statistics: RootMeanSquare (RMS) +* Distributions: Array sampling routines now available through interface +* Distributions: Categorical sampling now explicitly skips p=0 categories +* Generate: leverage array sampling routines for random data generation +* Generate: square, triangle and sawtooth waves +* Distance: Jaccard Index +* F#: explicitly depend on official FSharp.Core NuGet packages +* F#: NuGet package with iPython IfSharp F# module integration load script +* F#: load scripts with better packet support (and NuGet with -ExcludeVersion) +* Build: unified build.sh and buildn.sh into combined build.sh +* Build: use Paket instead of NuGet to maintain NuGet dependencies +* Build: for core add PCL profiles 7, 78 and 259; for F# extensions drop PCL profile 328 + +### 3.2.3 - 2014-09-06 +* BUG: MatrixNormal distribution: fix density for non-square matrices *~Evelina Gabasova* + +### 3.2.2 - 2014-09-05 +* BUG: MatrixNormal distribution: density computation switched row and column covariance *~Evelina Gabasova* + +### 3.2.1 - 2014-08-05 +* Package fix: make sure .Net 3.5-only dependencies are not installed on .Net 4 and newer. + +### 3.2.0 - 2014-08-05 +* Linear Algebra: Vector.Map2 (map2 in F#), storage-optimized +* Linear Algebra: fix RemoveColumn/Row early index bound check (was not strict enough) +* Statistics: Entropy *~Jeff Mastry* +* Interpolation: use Array.BinarySearch instead of local implementation *~Candy Chiu* +* Resources: fix a corrupted exception message string +* Portable Build: support .Net 4.0 as well by using profile 328 instead of 344. +* .Net 3.5: F# extensions now support .Net 3.5 as well +* .Net 3.5: NuGet package now contains proper 3.5-only TPL package dependency; also in Zip package + +### 3.1.0 - 2014-07-20 +* Random: generate a sequence of integers within a range in one go +* Distributions: all distributions must have static routines to sample an array in one go +* Linear Algebra: fix Matrix.StrictlyLowerTriangle +* Linear Algebra: fix vector DoOuterProduct *~mjmckp* +* Linear Algebra: enumerators accept Zeros-parameter (like map/fold already does) +* Linear Algebra: Vector.MapConvert (consistency) +* Linear Algebra: proper term for 'conjugate symmetric' is 'Hermitian' +* Interpolation: new Step, LogLinear and transformed interpolators *~Candy Chiu* +* Interpolation: check for min required number of data points, throw ArgumentException if not. +* Root Finding: F# FindRoots.broyden module function *~teramonagi* +* Misc docs fixes + +### 3.0.2 - 2014-06-26 +* BUG: fixing a bug in Matrix.RemoveRow range checks. + +### 3.0.1 - 2014-06-24 +* BUG: fixing a bug in new Matrix.ToMatrixString and Vector.ToVectorString routines. + +### 3.0.0 - 2014-06-21 +* First stable v3 release: + * [Upgrade Notes](https://github.com/mathnet/mathnet-numerics/wiki/Upgrading-to-Version-3) + * Stable API, no more breaking changes for all future v3 releases (except previews). + * Finally unlocks development and contributions around non-linear optimization and native providers over the next few minor releases. +* Native Providers: option to control max number of threads used by MKL. +* F#: Fit.multiDim; Matrix.qr, svd, eigen, lu and cholesky. + +### 3.0.0-beta05 - 2014-06-20 +* 2nd Candidate for v3.0 Release +* BUG: Distance: fix bug in Hamming distance that skipped the first pair. +* F#: packages now include a MathNet.Numerics.fsx script that includes FSI printers and references the assemblies. +* Linear Algebra: improved matrix and vector ToString formatting, more compact, adaptive to actual numbers. +* Linear Algebra: CoerceZero for matrix and vector to replace small numbers with zero. +* Regression: DirectRegressionMethod option to specify as argument which direct method should be used. +* Control: drop MaxToStringRows/Columns properties (no longer used) +* Random: clarify bad randomness properties of SystemRandomSource.FastDoubles (trade off) + +### 3.0.0-beta04 - 2014-06-16 +* Candidate for v3.0 Release +* Linear Algebra: + * FoldRows renamed to FoldByRow, now operates on and returns arrays; same for columns. **Breaking.** + * New FoldRows and ReduceRows that operate on row vectors; same for columns + * Split Map into Map and MapConvert (allows optimization in common in-place case) + * Row and column sums and absolute-sums + * F# DiagonalMatrix module to create diagonal matrices without using the builder + * F# Matrix module extended with sumRows, sumAbsRows, normRows; same for columns +* Build: extend build and release automation, automatic releases also for data extensions and native providers + +### 3.0.0-beta03 - 2014-06-05 +* Linear Algebra: vector outer product now follows common style, supports explicit result argument, more efficient. +* Interpolation: must not modify/sort original data; alternative Sorted and Inplace functions. +* Distributions: static IsValidParameterSet functions. +* Distributions: all distributions are now immutable in their distribution parameters. **Breaking.** +* NuGet: attempt to create proper symbol+source packages on symbolsource; primary packages smaller, w/o pdbs +* Build: skip long tests with new "quick" argument (FAKE) +* Build: clearing is more explicit, fixes most locking issues if solution is also open in IDE. +* Build: automated publishing docs, api, git release tag (maintainer) + +### 3.0.0-beta02 - 2014-05-29 +* Linear Algebra: + * optimized sparse-sparse and sparse-diagonal matrix products. *~Christian Woltering* + * transpose at storage level, optimized sparse transpose. *~Christian Woltering* + * optimized inplace-map, indexed submatrix-map. + * optimized clearing a set of rows or columns. + * matrix FoldRows/FoldColumns. + * matrix column/row norms, normalization. + * prefer enums over boolean parameters (e.g. `Zeros.AllowSkip`). + * IsSymmetric is now a method, add IsConjugateSymmetric. **Breaking.** + * Eigenvalue decomposition can optionally skip symmetry test. + * Direct diagonal-scalar division implementation +* Test Functions: Rosenbrock, Rastrigin, DropWave, Ackley, Bohachevsky, Matyas, SixHumpCamel, Himmelblau +* Statistics: DescriptiveStatistics support for larger datasets. +* MKL: native providers must not require MFC to compile. +* Sorting helpers support sub-range sorting, use insertion sort on very small sets. +* Build: extend usage of FAKE, automate docs, api, Zip and NuGet package generation. +* Portable: replace PCL profile 136 with profile 344, support for WP8.1 +* Xamarin: prepare for better Xamarin Android/iOS support and for adding to the Xamarin store (free). +* Misc code style fixes. +* Update Vagrant setup to official Ubuntu 14.04 LTS box and proper apt-style Mono+F# provisioning. + +### 3.0.0-beta01 - 2014-04-01 +* See also: [Roadmap](https://sdrv.ms/17wPFlW) and [Towards Math.NET Numerics Version 3](http://christoph.ruegg.name/blog/towards-mathnet-numerics-v3.html). +* **Major release with breaking changes** +* All obsolete code has been removed +* Reworked redundancies, inconsistencies and unfortunate past design choices. +* Significant namespace simplifications (-30%). +* Linear Algebra: + * Favor and optimize for generic types, e.g. `Vector`. + * Drop the `.Generic` in the namespaces and flattened solver namespaces. + * F#: all functions in the modules now fully generic, including the `matrix` function. + * F#: `SkipZeros` instead of the cryptic `nz` suffix for clarity. + * Add missing scalar-matrix routines. + * Optimized mixed dense-diagonal and diagonal-dense operations (500x faster on 250k set). + * More reasonable choice of return structure on mixed operations (e.g. dense+diagonal). + * Add point-wise infix operators `.*`, `./`, `.%` where supported (F#) + * Vectors explicitly provide proper L1, L2 and L-infinity norms. + * All norms return the result as double (instead of the specific value type of the matrix/vector). + * Matrix L-infinity norm now cache-optimized (8-10x faster). + * Vectors have a `ConjugateDotProduct` in addition to `DotProduct`. + * `Matrix.ConjugateTransposeAndMultiply` and variants. + * Matrix Factorization types fully generic, easily accessed by new `Matrix` member methods (replacing the extension methods). Discrete implementations no longer visible. + * QR factorization is thin by default. + * Matrix factorizations no longer clone their results at point of access. + * Add direct factorization-based `Solve` methods to matrix type. + * Massive iterative solver implementation/design simplification, now mostly generic and a bit more functional-style. + * Renamed iterative solver stop criteria from 'criterium' to 'criterion'. + * New MILU(0) iterative solver preconditioner that is much more efficient and fully leverages sparse data. *~Christian Woltering* + * Matrices/Vectors now have more consistent enumerators, with a variant that skips zeros (useful if sparse). + * Matrix/Vector creation routines have been simplified and usually no longer require explicit dimensions. New variants to create diagonal matrices, or such where all fields have the same value. All functions that take a params array now have an overload accepting an enumerable (e.g. `OfColumnVectors`). + * Generic Matrix/Vector creation using builders, e.g. `Matrix.Build.DenseOfEnumerable(...)` + * Create a matrix from a 2D-array of matrices (top-left aligned within the grid). + * Create a matrix or vector with the same structural type as an example (`.Build.SameAs(...)`) + * Removed non-static Matrix/Vector.CreateMatrix/CreateVector routines (no longer needed) + * Add Vector.OfArray (copying the array, consistent with Matrix.OfArray - you can still use the dense vector constructor if you want to use the array directly without copying). + * More convenient and one more powerful overload of `Matrix.SetSubMatrix`. + * Matrices/Vectors expose whether storage is dense with a new IsDense property. + * Various minor performance work. + * Matrix.ClearSubMatrix no longer throws on 0 or negative col/row count (nop) + * BUG: Fix bug in routine to copy a vector into a sub-row of a matrix. + * Both canonical modulus and remainder operations on matrices and vectors. + * Matrix kernel (null space) and range (column space) + * Storage-aware non-inplace functional map on vectors and matrices + * Pointwise power, exponential and natural logarithm for vectors and matrices. + * Matrix positive-integer power + * Matrix RemoveRow/RemoveColumn; more efficient InsertRow/InsertColumn +* Native Linear Algebra/Intel MKL: + * Thin QR factorization uses MKL if enabled for all types (previously just `double`) + * Sparse matrix CSR storage format now uses the much more common row pointer convention and is fully compatible with MKL (so there is nothing in the way to add native provider support). + * Providers have been moved to a `Providers` namespace and are fully generic again. + * Simpler provider usage: `Control.UseNativeMKL()`, `Control.UseManaged()`. + * MKL native provider now supports capability querying (so we can extend it much more reliably without breaking your code). + * MKL native provider consistency, precision and accuracy now configurable (trade-off). + * Native Provider development has been reintegrated into the main repository; we can now directly run all unit tests against local native provider builds. Covered by FAKE builds. +* Statistics: + * Pearson and Spearman correlation matrix of a set of arrays. + * Spearman ranked correlation optimized (4x faster on 100k set) + * Skewness and PopulationSkewness; Kurtosis and PopulationKurtosis. + * Single-pass `MeanVariance` and `MeanStandardDeviation` methods (often used together). + * Some overloads for single-precision values. + * Add `Ranks`, `QuantileRank` and `EmpiricalCDF`. + * F# module for higher order functions. + * Median direct implementation (instead of R8-compatible 0.5-quantile) + * New RunningStatistics that can be updated and merged + * BUG: DescriptiveStatistics must return NaN if not enough data for a specific statistic. +* Probability Distributions: + * Direct static distributions functions (PDF, CDF, sometimes also InvCDF). + * Direct static sample functions, including such to fill an existing array in one call. + * New Trigangular distribution *~Superbest, David Prince* + * Add InvCDF to Gamma, Student-T, FisherSnedecor (F), and Beta distributions. + * Major API cleanup, including xml docs + * Xml doc and ToString now use well-known symbols for the parameters. + * Maximum-likelihood parameter estimation for a couple distributions. + * All constructors now optionally accept a random source as last argument. + * Use less problematic RNG-seeds by default, if no random source is provided. + * Simpler and more composable random sampling from distributions. + * Much more distribution's actual sample distribution is verified in tests (all continuous, most discrete). + * Binomial.CDF now properly leverages BetaRegularized. + * BUG: Fix hyper-geometric CDF semantics, clarify distribution parameters. + * BUG: Fix Zipf CDF at x=1. + * BUG: Fix Geometric distribution sampling. + * BUG: Fix Categorical distribution properties. *~David Prince* +* Random Numbers: + * All RNGs provide static Sample(values) functions to fill an existing array. + * Thread-safe System.Random available again as `SystemRandomSource`. + * Fast and simple to use static `SystemRandomSource.Doubles` routine with lower randomness guarantees. + * Shared `SystemRandomSource.Default` and `MersenneTwister.Default` instances to skip expensive initialization. + * Using thread-safe random source by default in distributions, Generate, linear algebra etc. + * Tests always use seeded RNGs for reproducability. + * F#: direct sampling routines in the `Random` module, also including default and shared instances. +* Linear Regression: + * Reworked `Fit` class, supporting more simple scenarios. + * New `.LinearRegression` namespace with more options. + * Better support for simple regression in multiple dimensions. + * Goodness of Fit: R, RSquared *~Ethar Alali* + * Weighted polynomial and multi-dim fitting. + * Use more efficient LA routines *~Thomas Ibel* +* Interpolation: + * Return tuples instead of out parameter. + * Reworked splines, drop complicated and limiting inheritance design. More functional approach. + * More efficient implementation for non-cubic splines (especially linear spline). + * `Differentiate2` instead of `DifferentiateAll`. + * Definite `Integrate(a,b)` in addition to existing indefinite `Integrate(t)`. + * Use more common names in `Interpolate` facade, e.g. "Spline" is a well known name. +* Root Finding: Chebychev polynomial roots. +* Root Finding: Cubic polynomials roots. *~Candy Chiu* +* Trig functions: common short names instead of very long names. Add sinc function. +* Excel functions: TDIST, TINV, BETADIST, BETAINV, GAMMADIST, GAMMAINV, NORMDIST, NORMINV, NORMSDIST, NORMSINV QUARTILE, PERCENTILE, PERCENTRANK. +* Special functions: BetaRegularized more robust for large arguments. +* Special functions: new `GammaLowerRegularizedInv`. +* New distance functions in `Distance`: euclidean, manhattan, chebychev distance of arrays or generic vectors. SAD, MAE, SSD, MSE metrics. Pearson's, Canberra and Minkowski distance. Hamming distance. +* Windows: ported windowing functions from Neodym (Hamming, Hann, Cosine, Lanczos, Gauss, Blackmann, Bartlett, ...) +* BigInteger factorial +* Build: + * FAKE-based build (in addition to existing Visual Studio solutions) to clean, build, test, document and package independently of the CI server. + * Finally proper documentation using FSharp.Formatting with sources included in the repository so it is versioned and can be contributed to with pull requests. + * NuGet packages now also include the PCL portable profile 47 (.Net 4.5, Silverlight 5, Windows 8) in addition to the normal .Net 4.0 build and PCL profile 136 (.Net 4.0, WindowsPhone 8, Silverlight 5, Windows 8) as before. Profile 47 uses `System.Numerics` for complex numbers, among others, which is not available in profile 136. + * NuGet packages now also include a .Net 3.5 build of the core library. + * IO libraries have been removed, replaced with new `.Data` packages (see list on top). + * Alternative strong-named versions of more NuGet packages (mostly the F# extensions for now), with the `.Signed` suffix. + * Reworked solution structure so it works in both Visual Studio 11 (2012) and 12 (2013). + * We can now run the full unit test suite against the portable builds as well. + * Builds should now also work properly on recent Mono on Linux (including F# projects). + * Fixed builds on platforms with case sensitive file systems. *~Gauthier Segay* +* Integration: simplification of the double-exponential transformation api design. +* FFT: converted to static class design and shorter names for simpler usage. Drop now redundant `Transform` class. +* Generate: ported synthetic data generation and sampling routines from Neodym (includes all from old Signals namespace). F# module for higher order functions. +* Euclid: modulus vs remainder (also BigInteger), integer theory (includes all from old NumberTheory namespace). +* Complex: common short names for Exp, Ln, Log10, Log. +* Complex: fix issue where a *negative zero* may flip the sign in special cases (like `Atanh(2)`, where incidentally MATLAB and Mathematica do not agree on the sign either). +* Complex: routines to return all two square and three cubic roots of a complex number. +* Complex: More robust complex Asin/Acos for large real numbers. +* Evaluate: routine to evaluate complex polynomials, or real polynomials at a complex point. +* CommonParallel now also supported in .Net 3.5 and portable profiles; TaskScheduler can be replaced with custom implementation *~Thomas Ibel* +* F# BigRational type cleaned up and optimized *~Jack Pappas* +* F# BigRational IsZero, IsOne, IsInteger, create from fraction. +* F# BigRational Reciprocal, Power operator support (**), support for negative integer powers. +* F# functions now use the clearer `Func` suffix instead of just `F` if they return a function. +* Precision: reworked, now much more consistent. **If you use `AlmostEqual` with numbers-between/ULP semantics, please do review your code to make sure you're still using the expected variant!**. If you use the decimal-places semantics, you may need to decrement the digits argument to get the same behavior as before. +* Much less null checks, our code generally only throws `ArgumentNullException` if an unexpected null argument would *not* have caused an immediate `NullReferenceException`. +* Cases where `ArgumentOutOfRangeExceptions` where thrown with wrong arguments (i.e. no parameter name) now throw `ArgumentException` instead. +* Tests now have category attributes (to selectively run or skip categories). + +### 2.6.2 - 2013-10-21 +* Patch release, fixing the NuGet package to work better in WindowsPhone 8 projects. Assemblies are not changed. + +### 2.6.1 - 2013-08-13 +* BUG: fixing a bug in `ArrayStatistics.Variance` on arrays longer than 46341 entries. + +### 2.6.0 - 2013-07-26 +* See also: [What's New in Math.NET Numerics 2.6](http://christoph.ruegg.name/blog/new-in-mathnet-numerics-2-6.html) +* Linear Curve Fitting: Linear least-squares fitting (regression) to lines, polynomials and linear combinations of arbitrary functions. Multi-dimensional fitting. Also works well in F# with the F# extensions. +* Root Finding: + * Brent's method. *~Candy Chiu, Alexander Täschner* + * Bisection method. *~Scott Stephens, Alexander Täschner* + * Broyden's method, for multi-dimensional functions. *~Alexander Täschner* + * Newton-Raphson method. + * Robust Newton-Raphson variant that tries to recover automatically in cases where it would fail or converge too slowly. This modification makes it more robust e.g. in the presence of singularities and less sensitive to the search range/interval. + * All algorithms support a TryFind-pattern which returns success instead of throwing an exception. + * Special case for quadratic functions, in the future to be extended e.g. to polynomials. + * Basic bracketing algorithm + * Also works well in F# with the F# extensions. +* Linear Algebra: + * Native eigenvalue decomposition (EVD) support with our MKL packages *~Marcus Cuda* + * Add missing scalar-vector operations (s-v, s/v, s%v) *~Thomas Ibel* + * Support for new F# 3.1 row/column slicing syntax on matrices + * Matrices learned proper OfColumn/RowVectors, analog also in F#. + * Documentation Fixes *~Robin Neatherway* + * BUG: Fixed exception text message when creating a matrix from enumerables (rows vs columns) *~Thomas Ibel* + * We're phasing out MathNet.Numerics.IO that used to be included in the main package for matrix file I/O for text and Matlab formats. Use the new .Data.Text and .Data.Matlab packages instead. +* Statistics: Spearman Rank Correlation Coefficient *~Iain McDonald* +* Statistics: Covariance function, in Array-, Streaming- and common Statistics. +* Distributions: Categorical: distribution more consistent, no longer requires normalized pdf/cdf parameters +* Distributions: Categorical: inverse CDF function *~Paul Varkey* +* BUG: Distributions: Fixed static sampling methods of the `Stable` distribution. *~Artyom Baranovskiy* +* BUG: Fixed a bug in the Gamma Regularized special function where in some cases with large values it returned 1 instead of 0 and vice versa. +* The F# extensions now have a strong name in (and only in) the signed package as well (previously had not been signed). *~Gauthier Segay* +* Evaluate.Polynomial with new overload which is easier to use. +* Fixed a couple badly designed unit tests that failed on Mono. +* Repository now Vagrant-ready for easy testing against recent Mono on Debian. + +### 2.5.0 - 2013-04-14 +* See also: [What's New in Math.NET Numerics 2.5](http://christoph.ruegg.name/blog/new-in-mathnet-numerics-2-5.html) +* Statistics: Empty statistics now return NaN instead of either 0 or throwing an exception. *This may break code in case you relied upon the previous unusual and inconsistent behavior.* +* Linear Algebra: More reasonable ToString behavior for matrices and vectors. *This may break code if you relied upon ToString to export your full data to text form intended to be parsed again later. Note that the classes in the MathNet.Numerics.IO library are more appropriate for storing and loading data.* +* Statistics: + * More consistent behavior for empty and single-element data sets: Min, Max, Mean, Variance, Standard Deviation etc. no longer throw exceptions if the data set is empty but instead return NaN. Variance and Standard Deviation will also return NaN if the set contains only a single entry. Population Variance and Population Standard Deviation will return 0 in this case. + * Reworked order statistics (Quantile, Quartile, Percentile, IQR, Fivenum, etc.), now much easier to use and supporting compatibility with all 9 R-types, Excel and Mathematica. The obsolete Percentile class now leverages the new order statistics, fixing a range check bug as side effect. + * New Hybrid Monte Carlo sampler for multivariate distributions. *~manyue* + * New financial statistics: absolute risk and return measures. *~Phil Cleveland* + * Explicit statistics for sorted arrays, unsorted arrays and sequences/streams. Faster algorithms on sorted data, also avoids multiple enumerations. + * Some statistics like Quantile or empirical inverse CDF can optionally return a parametric function when multiple evaluations are needed, like for plotting. +* Linear Algebra: + * More reasonable ToString behavior for matrices and vectors: `ToString` methods no longer render the whole structure to a string for large data, among others because they used to wreak havoc in debugging and interactive scenarios like F# FSI. Instead, ToString now only renders an excerpt of the data, together with a line about dimension, type and in case of sparse data a sparseness indicator. The intention is to give a good idea about the data in a visually useful way. How much data is shown can be adjusted in the Control class. See also ToTypeString and ToVector/MatrixString. + * Performance: reworked and tuned common parallelization. Some operations are up to 3 magnitudes faster in some extreme cases. Replaced copy loops with native routines. More algorithms are storage-aware (and should thus perform better especially on sparse data). *~Thomas Ibel, Iain McDonald, Marcus Cuda* + * Fixed range checks in the Thin-QR decomposition. *~Marcus Cuda* + * BUG: Fixed bug in Gram Schmidt for solving tall matrices. *~Marcus Cuda* + * Vectors now implement the BCL IList interfaces (fixed-length) for better integration with existing .Net code. *~Scott Stephens* + * Matrix/Vector parsing has been updated to be able to parse the new visual format as well (see ToMatrixString). + * DebuggerDisplay attributes for matrices and vectors. + * Map/IndexedMap combinators with storage-aware and partially parallelized implementations for both dense and sparse data. + * Reworked Matrix/Vector construction from arrays, enumerables, indexed enumerables, nested enumerables or by providing an init function/lambda. Non-obsolete constructors now always use the raw data array directly without copying, while static functions always return a matrix/vector independent of the provided data source. + * F#: Improved extensions for matrix and vector construction: create, zeroCreate, randomCreate, init, ofArray2, ofRows/ofRowsList, ofColumns/ofColumnsList, ofSeqi/Listi (indexed). Storage-aware for performance. + * F#: Updated map/mapi and other combinators to leverage core implementation, added -nz variants where zero-values may be skipped (relevant mostly for sparse matrices). + * F#: Idiomatic slice setters for sub-matrices and sub-vectors + * F#: More examples for matrix/vector creation and linear regression in the F# Sample-package. +* Control: Simpler usage with new static ConfigureAuto and ConfigureSingleThread methods. Resolved misleading configuration logic and naming around disabling parallelization. +* Control: New settings for linear algebra ToString behavior. +* Fixed range check in the Xor-shift pseudo-RNG. +* Parallelization: Reworked our common logic to avoid expensive lambda calls in inner loops. Tunable. +* F#: Examples (and thus the NuGet Sample package) are now F# scripts prepared for experimenting interactively in FSI, instead of normal F# files. Tries to get the assembly references right for most users, both within the Math.NET Numerics solution and the NuGet package. +* Various minor improvements on consistency, performance, tests, xml docs, obsolete attributes, redundant code, argument checks, resources, cleanup, nuget, etc. + +### 2.4.0 - 2013-02-03 +* Drops the dependency on the zlib library. We thus no longer have any dependencies on other packages. *~Marcus Cuda, Thomas Ibel* +* Adds Modified Bessel & Struve special functions *~Wei Wu* +* BUG: Fixes a bug in our iterative kurtosis statistics formula *~Artyom Baranovskiy* +* Linear Algebra: Performance work, this time mostly around accessing matrix rows/columns as vectors. Opting out from targeted patching in our matrix and vector indexers to allow inlining. +* Linear Algebra: Fixes an issue around Thin-QR solve *~Marcus Cuda* +* Linear Algebra: Simplifications around using native linear algebra providers (see Math.NET Numerics With Native Linear Algebra) +* F#: Adds the BigRational module from the F# PowerPack, now to be maintained here instead. *~Gustavo Guerra* +* F#: Better support for our Complex types (close to the F# PowerPack Complex type) *~Gustavo Guerra* + +### 2.3.0 - 2013-11-25 +* Portable Library: Adds support for WP8 (.Net 4.0 and higher, SL5, WP8 and .NET for Windows Store apps) +* Portable Library: New: portable build also for F# extensions (.Net 4.5, SL5 and .NET for Windows Store apps) +* Portable Library: NuGet: portable builds are now included in the main packages, no more need for special portable packages +* Linear Algebra: Continued major storage rework, in this release focusing on vectors (previous release was on matrices) +* Linear Algebra: Thin QR decomposition (in addition to existing full QR) +* Linear Algebra: Static CreateRandom for all dense matrix and vector types +* Linear Algebra: F#: slicing support for matrices and vectors +* Distributions: Consistent static Sample methods for all continuous and discrete distributions (was previously missing on a few) +* F#: better usability for random numbers and distributions. +* F# extensions are now using F# 3.0 +* Updated Intel MKL references for our native linear algebra providers +* Various bug, performance and usability fixes